gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to use the AWS SDK for Python (Boto3) with Amazon Polly to synthesize
speech and manage custom lexicons.
"""
import io
import json
import logging
import time
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
# snippet-start:[python.example_code.polly.helper.PollyWrapper]
class PollyWrapper:
"""Encapsulates Amazon Polly functions."""
def __init__(self, polly_client, s3_resource):
"""
:param polly_client: A Boto3 Amazon Polly client.
:param s3_resource: A Boto3 Amazon Simple Storage Service (Amazon S3) resource.
"""
self.polly_client = polly_client
self.s3_resource = s3_resource
self.voice_metadata = None
# snippet-end:[python.example_code.polly.helper.PollyWrapper]
# snippet-start:[python.example_code.polly.DescribeVoices]
def describe_voices(self):
"""
Gets metadata about available voices.
:return: The list of voice metadata.
"""
try:
response = self.polly_client.describe_voices()
self.voice_metadata = response['Voices']
logger.info("Got metadata about %s voices.", len(self.voice_metadata))
except ClientError:
logger.exception("Couldn't get voice metadata.")
raise
else:
return self.voice_metadata
# snippet-end:[python.example_code.polly.DescribeVoices]
# snippet-start:[python.example_code.polly.Synthesize]
def synthesize(
self, text, engine, voice, audio_format, lang_code=None,
include_visemes=False):
"""
Synthesizes speech or speech marks from text, using the specified voice.
:param text: The text to synthesize.
:param engine: The kind of engine used. Can be standard or neural.
:param voice: The ID of the voice to use.
:param audio_format: The audio format to return for synthesized speech. When
speech marks are synthesized, the output format is JSON.
:param lang_code: The language code of the voice to use. This has an effect
only when a bilingual voice is selected.
:param include_visemes: When True, a second request is made to Amazon Polly
to synthesize a list of visemes, using the specified
text and voice. A viseme represents the visual position
of the face and mouth when saying part of a word.
:return: The audio stream that contains the synthesized speech and a list
of visemes that are associated with the speech audio.
"""
try:
kwargs = {
'Engine': engine,
'OutputFormat': audio_format,
'Text': text,
'VoiceId': voice}
if lang_code is not None:
kwargs['LanguageCode'] = lang_code
response = self.polly_client.synthesize_speech(**kwargs)
audio_stream = response['AudioStream']
logger.info("Got audio stream spoken by %s.", voice)
visemes = None
if include_visemes:
kwargs['OutputFormat'] = 'json'
kwargs['SpeechMarkTypes'] = ['viseme']
response = self.polly_client.synthesize_speech(**kwargs)
visemes = [json.loads(v) for v in
response['AudioStream'].read().decode().split() if v]
logger.info("Got %s visemes.", len(visemes))
except ClientError:
logger.exception("Couldn't get audio stream.")
raise
else:
return audio_stream, visemes
# snippet-end:[python.example_code.polly.Synthesize]
def _wait_for_task(self, tries, task_id, task_type, wait_callback, output_bucket):
"""
Waits for an asynchronous speech synthesis task to complete. This function
polls Amazon Polly for data about the specified task until a completion
status is returned or the number of tries is exceeded.
When the task successfully completes, the task output is retrieved from the
output Amazon S3 bucket and the output object is deleted.
:param tries: The number of times to poll for status.
:param task_id: The ID of the task to wait for.
:param task_type: The type of task. This is passed to the `wait_callback`
function to display status.
:param wait_callback: A callback function that is called after each poll,
to give the caller an opportunity to take action, such
as to display status.
:param output_bucket: The Amazon S3 bucket where task output is located.
:return: The output from the task in a byte stream.
"""
task = None
while tries > 0:
task = self.get_speech_synthesis_task(task_id)
task_status = task['TaskStatus']
logger.info("Task %s status %s.", task_id, task_status)
if wait_callback is not None:
wait_callback(task_type, task_status)
if task_status in ('completed', 'failed'):
break
time.sleep(5)
tries -= 1
output_stream = io.BytesIO()
if task is not None:
output_key = task['OutputUri'].split('/')[-1]
output_bucket.download_fileobj(output_key, output_stream)
output_bucket.Object(output_key).delete()
logger.info("Downloaded output for task %s.", task_id)
output_stream.seek(0)
return output_stream
# snippet-start:[python.example_code.polly.StartSpeechSynthesisTask]
def do_synthesis_task(
self, text, engine, voice, audio_format, s3_bucket, lang_code=None,
include_visemes=False, wait_callback=None):
"""
Start an asynchronous task to synthesize speech or speech marks, wait for
the task to complete, retrieve the output from Amazon S3, and return the
data.
An asynchronous task is required when the text is too long for near-real time
synthesis.
:param text: The text to synthesize.
:param engine: The kind of engine used. Can be standard or neural.
:param voice: The ID of the voice to use.
:param audio_format: The audio format to return for synthesized speech. When
speech marks are synthesized, the output format is JSON.
:param s3_bucket: The name of an existing Amazon S3 bucket that you have
write access to. Synthesis output is written to this bucket.
:param lang_code: The language code of the voice to use. This has an effect
only when a bilingual voice is selected.
:param include_visemes: When True, a second request is made to Amazon Polly
to synthesize a list of visemes, using the specified
text and voice. A viseme represents the visual position
of the face and mouth when saying part of a word.
:param wait_callback: A callback function that is called periodically during
task processing, to give the caller an opportunity to
take action, such as to display status.
:return: The audio stream that contains the synthesized speech and a list
of visemes that are associated with the speech audio.
"""
try:
kwargs = {
'Engine': engine,
'OutputFormat': audio_format,
'OutputS3BucketName': s3_bucket,
'Text': text,
'VoiceId': voice}
if lang_code is not None:
kwargs['LanguageCode'] = lang_code
response = self.polly_client.start_speech_synthesis_task(**kwargs)
speech_task = response['SynthesisTask']
logger.info("Started speech synthesis task %s.", speech_task['TaskId'])
viseme_task = None
if include_visemes:
kwargs['OutputFormat'] = 'json'
kwargs['SpeechMarkTypes'] = ['viseme']
response = self.polly_client.start_speech_synthesis_task(**kwargs)
viseme_task = response['SynthesisTask']
logger.info("Started viseme synthesis task %s.", viseme_task['TaskId'])
except ClientError:
logger.exception("Couldn't start synthesis task.")
raise
else:
bucket = self.s3_resource.Bucket(s3_bucket)
audio_stream = self._wait_for_task(
10, speech_task['TaskId'], 'speech', wait_callback, bucket)
visemes = None
if include_visemes:
viseme_data = self._wait_for_task(
10, viseme_task['TaskId'], 'viseme', wait_callback, bucket)
visemes = [json.loads(v) for v in
viseme_data.read().decode().split() if v]
return audio_stream, visemes
# snippet-end:[python.example_code.polly.StartSpeechSynthesisTask]
# snippet-start:[python.example_code.polly.GetSpeechSynthesisTask]
def get_speech_synthesis_task(self, task_id):
"""
Gets metadata about an asynchronous speech synthesis task, such as its status.
:param task_id: The ID of the task to retrieve.
:return: Metadata about the task.
"""
try:
response = self.polly_client.get_speech_synthesis_task(TaskId=task_id)
task = response['SynthesisTask']
logger.info("Got synthesis task. Status is %s.", task['TaskStatus'])
except ClientError:
logger.exception("Couldn't get synthesis task %s.", task_id)
raise
else:
return task
# snippet-end:[python.example_code.polly.GetSpeechSynthesisTask]
# snippet-start:[python.example_code.polly.PutLexicon]
def create_lexicon(self, name, content):
"""
Creates a lexicon with the specified content. A lexicon contains custom
pronunciations.
:param name: The name of the lexicon.
:param content: The content of the lexicon.
"""
try:
self.polly_client.put_lexicon(Name=name, Content=content)
logger.info("Created lexicon %s.", name)
except ClientError:
logger.exception("Couldn't create lexicon %s.")
raise
# snippet-end:[python.example_code.polly.PutLexicon]
# snippet-start:[python.example_code.polly.GetLexicon]
def get_lexicon(self, name):
"""
Gets metadata and contents of an existing lexicon.
:param name: The name of the lexicon to retrieve.
:return: The retrieved lexicon.
"""
try:
response = self.polly_client.get_lexicon(Name=name)
logger.info("Got lexicon %s.", name)
except ClientError:
logger.exception("Couldn't get lexicon %s.", name)
raise
else:
return response
# snippet-end:[python.example_code.polly.GetLexicon]
# snippet-start:[python.example_code.polly.ListLexicons]
def list_lexicons(self):
"""
Lists lexicons in the current account.
:return: The list of lexicons.
"""
try:
response = self.polly_client.list_lexicons()
lexicons = response['Lexicons']
logger.info("Got %s lexicons.", len(lexicons))
except ClientError:
logger.exception("Couldn't get %s.", )
raise
else:
return lexicons
# snippet-end:[python.example_code.polly.ListLexicons]
def get_voice_engines(self):
"""
Extracts the set of available voice engine types from the full list of
voice metadata.
:return: The set of voice engine types.
"""
if self.voice_metadata is None:
self.describe_voices()
engines = set()
for voice in self.voice_metadata:
for engine in voice['SupportedEngines']:
engines.add(engine)
return engines
def get_languages(self, engine):
"""
Extracts the set of available languages for the specified engine from the
full list of voice metadata.
:param engine: The engine type to filter on.
:return: The set of languages available for the specified engine type.
"""
if self.voice_metadata is None:
self.describe_voices()
return {vo['LanguageName']: vo['LanguageCode'] for vo
in self.voice_metadata
if engine in vo['SupportedEngines']}
def get_voices(self, engine, language_code):
"""
Extracts the set of voices that are available for the specified engine type
and language from the full list of voice metadata.
:param engine: The engine type to filter on.
:param language_code: The language to filter on.
:return: The set of voices available for the specified engine type and language.
"""
if self.voice_metadata is None:
self.describe_voices()
return {vo['Name']: vo['Id'] for vo in self.voice_metadata
if engine in vo['SupportedEngines']
and language_code == vo['LanguageCode']}
|
|
import time
import threading
import uuid
from nose.tools import eq_
from nose.tools import raises
from kazoo.exceptions import KazooException
from kazoo.protocol.states import EventType
from kazoo.testing import KazooTestCase
class KazooDataWatcherTests(KazooTestCase):
def setUp(self):
super(KazooDataWatcherTests, self).setUp()
self.path = "/" + uuid.uuid4().hex
self.client.ensure_path(self.path)
def test_data_watcher(self):
update = threading.Event()
data = [True]
# Make it a non-existent path
self.path += 'f'
@self.client.DataWatch(self.path)
def changed(d, stat):
data.pop()
data.append(d)
update.set()
update.wait(10)
eq_(data, [None])
update.clear()
self.client.create(self.path, b'fred')
update.wait(10)
eq_(data[0], b'fred')
update.clear()
def test_data_watcher_once(self):
update = threading.Event()
data = [True]
# Make it a non-existent path
self.path += 'f'
dwatcher = self.client.DataWatch(self.path)
@dwatcher
def changed(d, stat):
data.pop()
data.append(d)
update.set()
update.wait(10)
eq_(data, [None])
update.clear()
@raises(KazooException)
def test_it():
@dwatcher
def func(d, stat):
data.pop()
test_it()
def test_data_watcher_with_event(self):
# Test that the data watcher gets passed the event, if it
# accepts three arguments
update = threading.Event()
data = [True]
# Make it a non-existent path
self.path += 'f'
@self.client.DataWatch(self.path)
def changed(d, stat, event):
data.pop()
data.append(event)
update.set()
update.wait(10)
eq_(data, [None])
update.clear()
self.client.create(self.path, b'fred')
update.wait(10)
eq_(data[0].type, EventType.CREATED)
update.clear()
def test_func_style_data_watch(self):
update = threading.Event()
data = [True]
# Make it a non-existent path
path = self.path + 'f'
def changed(d, stat):
data.pop()
data.append(d)
update.set()
self.client.DataWatch(path, changed)
update.wait(10)
eq_(data, [None])
update.clear()
self.client.create(path, b'fred')
update.wait(10)
eq_(data[0], b'fred')
update.clear()
def test_datawatch_across_session_expire(self):
update = threading.Event()
data = [True]
@self.client.DataWatch(self.path)
def changed(d, stat):
data.pop()
data.append(d)
update.set()
update.wait(10)
eq_(data, [b""])
update.clear()
self.expire_session(threading.Event)
self.client.retry(self.client.set, self.path, b'fred')
update.wait(25)
eq_(data[0], b'fred')
def test_func_stops(self):
update = threading.Event()
data = [True]
self.path += "f"
fail_through = []
@self.client.DataWatch(self.path)
def changed(d, stat):
data.pop()
data.append(d)
update.set()
if fail_through:
return False
update.wait(10)
eq_(data, [None])
update.clear()
fail_through.append(True)
self.client.create(self.path, b'fred')
update.wait(10)
eq_(data[0], b'fred')
update.clear()
self.client.set(self.path, b'asdfasdf')
update.wait(0.2)
eq_(data[0], b'fred')
d, stat = self.client.get(self.path)
eq_(d, b'asdfasdf')
def test_no_such_node(self):
args = []
@self.client.DataWatch("/some/path")
def changed(d, stat):
args.extend([d, stat])
eq_(args, [None, None])
def test_no_such_node_for_children_watch(self):
args = []
path = self.path + '/test_no_such_node_for_children_watch'
update = threading.Event()
def changed(children):
args.append(children)
update.set()
# watch a node which does not exist
children_watch = self.client.ChildrenWatch(path, changed)
eq_(update.is_set(), False)
eq_(children_watch._stopped, True)
eq_(args, [])
# watch a node which exists
self.client.create(path, b'')
children_watch = self.client.ChildrenWatch(path, changed)
update.wait(3)
eq_(args, [[]])
update.clear()
# watch changes
self.client.create(path + '/fred', b'')
update.wait(3)
eq_(args, [[], ['fred']])
update.clear()
# delete children
self.client.delete(path + '/fred')
update.wait(3)
eq_(args, [[], ['fred'], []])
update.clear()
# delete watching
self.client.delete(path)
# a hack for waiting the watcher stop
for retry in range(5):
if children_watch._stopped:
break
children_watch._run_lock.acquire()
children_watch._run_lock.release()
time.sleep(retry / 10.0)
eq_(update.is_set(), False)
eq_(children_watch._stopped, True)
def test_bad_watch_func2(self):
counter = 0
@self.client.DataWatch(self.path)
def changed(d, stat):
if counter > 0:
raise Exception("oops")
raises(Exception)(changed)
counter += 1
self.client.set(self.path, b'asdfasdf')
def test_watcher_evaluating_to_false(self):
class WeirdWatcher(list):
def __call__(self, *args):
self.called = True
watcher = WeirdWatcher()
self.client.DataWatch(self.path, watcher)
self.client.set(self.path, b'mwahaha')
self.assertTrue(watcher.called)
def test_watcher_repeat_delete(self):
a = []
ev = threading.Event()
self.client.delete(self.path)
@self.client.DataWatch(self.path)
def changed(val, stat):
a.append(val)
ev.set()
eq_(a, [None])
ev.wait(10)
ev.clear()
self.client.create(self.path, b'blah')
ev.wait(10)
eq_(ev.is_set(), True)
ev.clear()
eq_(a, [None, b'blah'])
self.client.delete(self.path)
ev.wait(10)
eq_(ev.is_set(), True)
ev.clear()
eq_(a, [None, b'blah', None])
self.client.create(self.path, b'blah')
ev.wait(10)
eq_(ev.is_set(), True)
ev.clear()
eq_(a, [None, b'blah', None, b'blah'])
def test_watcher_with_closing(self):
a = []
ev = threading.Event()
self.client.delete(self.path)
@self.client.DataWatch(self.path)
def changed(val, stat):
a.append(val)
ev.set()
eq_(a, [None])
b = False
try:
self.client.stop()
except:
b = True
eq_(b, False)
class KazooChildrenWatcherTests(KazooTestCase):
def setUp(self):
super(KazooChildrenWatcherTests, self).setUp()
self.path = "/" + uuid.uuid4().hex
self.client.ensure_path(self.path)
def test_child_watcher(self):
update = threading.Event()
all_children = ['fred']
@self.client.ChildrenWatch(self.path)
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
update.wait(10)
eq_(all_children, [])
update.clear()
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(all_children, ['smith'])
update.clear()
self.client.create(self.path + '/' + 'george')
update.wait(10)
eq_(sorted(all_children), ['george', 'smith'])
def test_child_watcher_once(self):
update = threading.Event()
all_children = ['fred']
cwatch = self.client.ChildrenWatch(self.path)
@cwatch
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
update.wait(10)
eq_(all_children, [])
update.clear()
@raises(KazooException)
def test_it():
@cwatch
def changed_again(children):
update.set()
test_it()
def test_child_watcher_with_event(self):
update = threading.Event()
events = [True]
@self.client.ChildrenWatch(self.path, send_event=True)
def changed(children, event):
events.pop()
events.append(event)
update.set()
update.wait(10)
eq_(events, [None])
update.clear()
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(events[0].type, EventType.CHILD)
update.clear()
def test_func_style_child_watcher(self):
update = threading.Event()
all_children = ['fred']
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
self.client.ChildrenWatch(self.path, changed)
update.wait(10)
eq_(all_children, [])
update.clear()
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(all_children, ['smith'])
update.clear()
self.client.create(self.path + '/' + 'george')
update.wait(10)
eq_(sorted(all_children), ['george', 'smith'])
def test_func_stops(self):
update = threading.Event()
all_children = ['fred']
fail_through = []
@self.client.ChildrenWatch(self.path)
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
if fail_through:
return False
update.wait(10)
eq_(all_children, [])
update.clear()
fail_through.append(True)
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(all_children, ['smith'])
update.clear()
self.client.create(self.path + '/' + 'george')
update.wait(0.5)
eq_(all_children, ['smith'])
def test_child_watch_session_loss(self):
update = threading.Event()
all_children = ['fred']
@self.client.ChildrenWatch(self.path)
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
update.wait(10)
eq_(all_children, [])
update.clear()
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(all_children, ['smith'])
update.clear()
self.expire_session(threading.Event)
self.client.retry(self.client.create,
self.path + '/' + 'george')
update.wait(20)
eq_(sorted(all_children), ['george', 'smith'])
def test_child_stop_on_session_loss(self):
update = threading.Event()
all_children = ['fred']
@self.client.ChildrenWatch(self.path, allow_session_lost=False)
def changed(children):
while all_children:
all_children.pop()
all_children.extend(children)
update.set()
update.wait(10)
eq_(all_children, [])
update.clear()
self.client.create(self.path + '/' + 'smith')
update.wait(10)
eq_(all_children, ['smith'])
update.clear()
self.expire_session(threading.Event)
self.client.retry(self.client.create,
self.path + '/' + 'george')
update.wait(4)
eq_(update.is_set(), False)
eq_(all_children, ['smith'])
children = self.client.get_children(self.path)
eq_(sorted(children), ['george', 'smith'])
def test_bad_children_watch_func(self):
counter = 0
@self.client.ChildrenWatch(self.path)
def changed(children):
if counter > 0:
raise Exception("oops")
raises(Exception)(changed)
counter += 1
self.client.create(self.path + '/' + 'smith')
class KazooPatientChildrenWatcherTests(KazooTestCase):
def setUp(self):
super(KazooPatientChildrenWatcherTests, self).setUp()
self.path = "/" + uuid.uuid4().hex
def _makeOne(self, *args, **kwargs):
from kazoo.recipe.watchers import PatientChildrenWatch
return PatientChildrenWatch(*args, **kwargs)
def test_watch(self):
self.client.ensure_path(self.path)
watcher = self._makeOne(self.client, self.path, 0.1)
result = watcher.start()
children, asy = result.get()
eq_(len(children), 0)
eq_(asy.ready(), False)
self.client.create(self.path + '/' + 'fred')
asy.get(timeout=1)
eq_(asy.ready(), True)
def test_exception(self):
from kazoo.exceptions import NoNodeError
watcher = self._makeOne(self.client, self.path, 0.1)
result = watcher.start()
@raises(NoNodeError)
def testit():
result.get()
testit()
def test_watch_iterations(self):
self.client.ensure_path(self.path)
watcher = self._makeOne(self.client, self.path, 0.5)
result = watcher.start()
eq_(result.ready(), False)
time.sleep(0.08)
self.client.create(self.path + '/' + uuid.uuid4().hex)
eq_(result.ready(), False)
time.sleep(0.08)
eq_(result.ready(), False)
self.client.create(self.path + '/' + uuid.uuid4().hex)
time.sleep(0.08)
eq_(result.ready(), False)
children, asy = result.get()
eq_(len(children), 2)
|
|
import calendar
import time
import uuid
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import Http404
import commonware.log
from jingo.helpers import urlparams
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.generics import GenericAPIView
from rest_framework.mixins import ListModelMixin, RetrieveModelMixin
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
import mkt
from lib.cef_loggers import app_pay_cef
from mkt.api.authentication import (RestAnonymousAuthentication,
RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.api.permissions import AllowReadOnly, AnyOf, GroupPermission
from mkt.constants.regions import RESTOFWORLD
from mkt.purchase.models import Contribution
from mkt.receipts.utils import create_inapp_receipt
from mkt.site.mail import send_mail_jinja
from mkt.site.helpers import absolutify
from mkt.webpay.forms import FailureForm, PrepareInAppForm, PrepareWebAppForm
from mkt.webpay.models import ProductIcon
from mkt.webpay.serializers import (ContributionSerializer,
ProductIconSerializer)
from mkt.webpay.webpay_jwt import (get_product_jwt, InAppProduct,
sign_webpay_jwt, SimulatedInAppProduct,
WebAppProduct)
from . import tasks
log = commonware.log.getLogger('z.webpay')
class PreparePayWebAppView(CORSMixin, MarketplaceView, GenericAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [IsAuthenticated]
cors_allowed_methods = ['post']
cors_allowed_headers = ('content-type', 'accept', 'x-fxpay-version')
serializer_class = ContributionSerializer
def post(self, request, *args, **kwargs):
form = PrepareWebAppForm(request.DATA)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
app = form.cleaned_data['app']
region = getattr(request, 'REGION', None)
if region:
enabled_regions = app.get_price_region_ids()
region_can_purchase = region.id in enabled_regions
restofworld_can_purchase = RESTOFWORLD.id in enabled_regions
if not region_can_purchase and not restofworld_can_purchase:
log.info('Region {0} is not in {1}; '
'restofworld purchases are inactive'
.format(region.id, enabled_regions))
return Response(
{'reason': 'Payments are restricted for this region'},
status=status.HTTP_403_FORBIDDEN)
if app.is_premium() and app.has_purchased(request._request.user):
log.info('Already purchased: {0}'.format(app.pk))
return Response({'reason': u'Already purchased app.'},
status=status.HTTP_409_CONFLICT)
app_pay_cef.log(request._request, 'Preparing JWT', 'preparing_jwt',
'Preparing JWT for: {0}'.format(app.pk), severity=3)
log.debug('Starting purchase of app: {0} by user: {1}'.format(
app.pk, request._request.user))
contribution = Contribution.objects.create(
addon_id=app.pk,
amount=app.get_price(region=request._request.REGION.id),
paykey=None,
price_tier=app.premium.price,
source=request._request.GET.get('src', ''),
source_locale=request._request.LANG,
type=mkt.CONTRIB_PENDING,
user=request._request.user,
uuid=str(uuid.uuid4()),
)
log.debug('Storing contrib for uuid: {0}'.format(contribution.uuid))
token = get_product_jwt(WebAppProduct(app), contribution)
return Response(token, status=status.HTTP_201_CREATED)
class PreparePayInAppView(CORSMixin, MarketplaceView, GenericAPIView):
authentication_classes = []
permission_classes = []
cors_allowed_methods = ['post']
cors_allowed_headers = ('content-type', 'accept', 'x-fxpay-version')
serializer_class = ContributionSerializer
def post(self, request, *args, **kwargs):
form = PrepareInAppForm(request.DATA)
if not form.is_valid():
app_pay_cef.log(
request._request,
'Preparing InApp JWT Failed',
'preparing_inapp_jwt_failed',
'Preparing InApp JWT Failed error: {0}'.format(form.errors),
severity=3
)
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
inapp = form.cleaned_data['inapp']
app_pay_cef.log(
request._request,
'Preparing InApp JWT',
'preparing_inapp_jwt',
'Preparing InApp JWT for: {0}'.format(inapp.pk), severity=3
)
log.debug('Starting purchase of in app: {0}'.format(inapp.pk))
contribution = Contribution.objects.create(
addon_id=inapp.webapp and inapp.webapp.pk,
inapp_product=inapp,
# In-App payments are unauthenticated so we have no user
# and therefore can't determine a meaningful region.
amount=None,
paykey=None,
price_tier=inapp.price,
source=request._request.GET.get('src', ''),
source_locale=request._request.LANG,
type=mkt.CONTRIB_PENDING,
user=None,
uuid=str(uuid.uuid4()),
)
log.info('Storing contrib for uuid: {0}'.format(contribution.uuid))
if inapp.simulate:
log.info('Preparing in-app JWT simulation for {i}'
.format(i=inapp))
product = SimulatedInAppProduct(inapp)
else:
log.info('Preparing in-app JWT for {i}'.format(i=inapp))
product = InAppProduct(inapp)
token = get_product_jwt(product, contribution)
return Response(token, status=status.HTTP_201_CREATED)
class StatusPayView(CORSMixin, MarketplaceView, GenericAPIView):
"""
Get the status of a contribution (transaction) by UUID.
This is used by the Marketplace or third party apps to check
the fulfillment of a purchase. It does not require authentication
so that in-app payments can work from third party apps.
"""
authentication_classes = []
permission_classes = []
cors_allowed_methods = ['get']
cors_allowed_headers = ('content-type', 'accept', 'x-fxpay-version')
queryset = Contribution.objects.filter(type=mkt.CONTRIB_PURCHASE)
lookup_field = 'uuid'
def get_object(self):
try:
obj = super(StatusPayView, self).get_object()
except Http404:
# Anything that's not correct will be raised as a 404 so that it's
# harder to iterate over contribution values.
log.info('Contribution not found')
return None
return obj
def get(self, request, *args, **kwargs):
self.object = contrib = self.get_object()
data = {'status': 'complete' if self.object else 'incomplete',
'receipt': None}
if getattr(contrib, 'inapp_product', None):
data['receipt'] = create_inapp_receipt(contrib)
return Response(data)
class FailureNotificationView(MarketplaceView, GenericAPIView):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication]
permission_classes = [GroupPermission('Transaction', 'NotifyFailure')]
queryset = Contribution.objects.filter(uuid__isnull=False)
def patch(self, request, *args, **kwargs):
form = FailureForm(request.DATA)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
obj = self.get_object()
data = {
'transaction_id': obj,
'transaction_url': absolutify(
urlparams(reverse('mkt.developers.transactions'),
transaction_id=obj.uuid)),
'url': form.cleaned_data['url'],
'retries': form.cleaned_data['attempts']}
owners = obj.addon.authors.values_list('email', flat=True)
send_mail_jinja('Payment notification failure.',
'webpay/failure.txt',
data, recipient_list=owners)
return Response(status=status.HTTP_202_ACCEPTED)
class ProductIconViewSet(CORSMixin, MarketplaceView, ListModelMixin,
RetrieveModelMixin, GenericViewSet):
authentication_classes = [RestOAuthAuthentication,
RestSharedSecretAuthentication,
RestAnonymousAuthentication]
permission_classes = [AnyOf(AllowReadOnly,
GroupPermission('ProductIcon', 'Create'))]
queryset = ProductIcon.objects.all()
serializer_class = ProductIconSerializer
cors_allowed_methods = ['get', 'post']
filter_fields = ('ext_url', 'ext_size', 'size')
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.DATA)
if serializer.is_valid():
log.info('Resizing product icon %s @ %s to %s for webpay' % (
serializer.data['ext_url'],
serializer.data['ext_size'],
serializer.data['size']))
tasks.fetch_product_icon.delay(serializer.data['ext_url'],
serializer.data['ext_size'],
serializer.data['size'])
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['POST'])
@permission_classes((AllowAny,))
def sig_check(request):
"""
Returns a signed JWT to use for signature checking.
This is for Nagios checks to ensure that Marketplace's
signed tokens are valid when processed by Webpay.
"""
issued_at = calendar.timegm(time.gmtime())
req = {
'iss': settings.APP_PURCHASE_KEY,
'typ': settings.SIG_CHECK_TYP,
'aud': settings.APP_PURCHASE_AUD,
'iat': issued_at,
'exp': issued_at + 3600, # expires in 1 hour
'request': {}
}
return Response({'sig_check_jwt': sign_webpay_jwt(req)},
status=201)
|
|
'''This is a test for functionality of ANN_simulation.py
'''
import sys, os, math, subprocess, matplotlib
from functools import reduce
matplotlib.use('agg')
sys.path.append('../src/') # add the source file folder
from ANN_simulation import *
from numpy.testing import assert_almost_equal, assert_equal
class test_Sutils(object):
@staticmethod
def test_mark_and_modify_pdb_for_calculating_RMSD_for_plumed():
temp_out = 'temp_out.pdb'
Sutils.mark_and_modify_pdb_for_calculating_RMSD_for_plumed('../resources/1l2y.pdb', temp_out,
get_index_list_with_selection_statement('../resources/1l2y.pdb', 'name CA'))
a = Universe(temp_out)
b = a.select_atoms('name CA')
assert np.all(b.tempfactors) and np.all(b.occupancies)
b = a.select_atoms('not name CA')
assert not (np.any(b.tempfactors) or np.any(b.occupancies))
subprocess.check_output(['rm', temp_out])
return
@staticmethod
def test_write_some_frames_into_a_new_file():
input_pdb = '../tests/dependency/temp_output_0.pdb'
output_pdb = "../tests/dependency/temp_output_0_interval_3.pdb"
output_coor = output_pdb.replace('.pdb', '_coordinates.npy')
actual_output_coor = '../tests/dependency/temp_output_0_coor.npy'
for interval in range(3, 10):
Sutils.write_some_frames_into_a_new_file(input_pdb, 0, 0, interval, output_pdb)
if os.path.exists(output_coor):
subprocess.check_output(['rm', output_coor])
Alanine_dipeptide.generate_coordinates_from_pdb_files(output_pdb)
assert_almost_equal(np.load(output_coor), np.load(actual_output_coor)[::interval])
subprocess.check_output(['rm', output_coor, output_pdb])
return
@staticmethod
def test_get_boundary_points():
"""generate plotting for tests"""
cov = [[0.1, 0], [0, 0.1]] # diagonal covariance
get_points = lambda mean: np.random.multivariate_normal(mean, cov, 50)
points = reduce(lambda x, y: np.concatenate((x, y)), list(map(get_points, [[0, 1], [0, -1]])))
boundary_points = Sutils.get_boundary_points(points, preprocessing=True)
x, y = list(zip(*points))
x1, y1 = list(zip(*boundary_points))
fig, ax = plt.subplots()
ax.scatter(x, y, c='b')
ax.scatter(x1, y1, c='r')
fig.savefig('test_get_boundary_points_noncircular.png')
points = reduce(lambda x, y: np.concatenate((x, y)), list(map(get_points, [[-.8, -.8]])))
boundary_points = Sutils.get_boundary_points(points, preprocessing=True, is_circular_boundary=True,
range_of_PCs=[[-1, 1], [-1, 1]])
x, y = list(zip(*points))
x1, y1 = list(zip(*boundary_points))
fig, ax = plt.subplots()
ax.scatter(x, y, c='b')
ax.scatter(x1, y1, c='r')
fig.savefig('test_get_boundary_points_circular.png')
return
@staticmethod
def test_get_boundary_points_2_diagram():
"""diagram for the find_boundary algorithm"""
dimensionality = 2
fig, axes = plt.subplots(2, 2)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
fig.set_size_inches(15, 15)
# hist_matrix = np.random.randint(1, 10, size=(size_of_grid, size_of_grid))
hist_matrix = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 3, 5, 3, 2, 1, 0],
[0, 0, 2, 9, 6, 2, 0, 0],
[0, 0, 5, 1, 7, 2, 0, 0],
[0, 1, 2, 9, 8, 1, 0, 0],
[0, 0, 0, 1, 4, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
hist_matrix = np.array(hist_matrix)
hist_matrix_processed = [[- np.exp(- y) for y in x] for x in hist_matrix] # preprocessing process
diff_with_neighbors = hist_matrix_processed - 1.0 / (2 * dimensionality) * sum(
[np.roll(hist_matrix_processed, 1, axis=x)
+ np.roll(hist_matrix_processed, -1, axis=x) for x in range(dimensionality)]
)
temp_fontsize = 25
sns.heatmap(hist_matrix, ax=axes[0][0], annot=True, cbar=False)
sns.heatmap(hist_matrix_processed, ax=axes[0][1], annot=True, cbar=False)
sns.heatmap(diff_with_neighbors, ax=axes[1][0], annot=True, cbar=False)
sns.heatmap(diff_with_neighbors < 0, ax=axes[1][1], annot=False, cbar=False)
axes[0][0].set_title(r'number of data points $n_i$', fontsize=temp_fontsize)
axes[0][1].set_title(r'$p_i = -\exp{(-n_i)}$', fontsize=temp_fontsize)
axes[1][0].text(2, 8.5, r'$v_i = p_i-\frac{1}{| K_i |}\sum_{j \in K_i} p_j$', fontsize=temp_fontsize)
axes[1][1].set_title('locations of selected cells', fontsize=temp_fontsize)
temp_annotation = ['(a)', '(b)', '(c)', '(d)']
index = 0
for _1 in axes:
for ax in _1:
ax.set_xlabel('$\\xi_1$', fontsize=temp_fontsize)
ax.set_ylabel('$\\xi_2$', fontsize=temp_fontsize)
ax.text(-0.5, 8.4, temp_annotation[index], fontsize=temp_fontsize - 5)
index += 1
# fig.tight_layout()
fig.savefig('diagram_of_finding_boundary.pdf', format='pdf', bbox_inches='tight')
return
@staticmethod
def test_L_method():
evaluation_values = [0, 0.1, 0.5, 0.85, 0.9, 0.93]
nums = list(range(len(evaluation_values)))
opt_num, x_data, y_data_left, y_data_right = Sutils.L_method(evaluation_values, nums)
fig, ax = plt.subplots()
ax.plot(x_data, y_data_left)
ax.plot(x_data, y_data_right)
ax.scatter(nums, evaluation_values)
fig.savefig("L_method.png")
assert (opt_num == 4), opt_num
return
@staticmethod
def test_rotating_coordinates():
data = np.loadtxt('../tests/dependency/temp_Trp_cage_data/1l2y_coordinates.txt').reshape((38, 60, 3))[0]
actual = Sutils.rotating_coordinates(data, [0,0,0], [0,0,1], np.pi / 2)
expected = np.array([data[:, 1], - data[:,0], data[:,2]]).T
assert_almost_equal(expected, actual)
return
@staticmethod
def test__get_expression_script_for_plumed():
with open('dependency/expected_plumed_Trp_script.txt', 'r') as my_f:
expected = my_f.read().strip()
actual = Trp_cage.get_expression_script_for_plumed(scaling_factor=2.0).strip()
assert (expected == actual), actual
return
# class test_Alanine_dipeptide(object):
# @staticmethod
# def test_get_many_cossin_from_coordiantes_in_list_of_files():
# list_of_files = ['../tests/dependency/biased_output_fc_1000_x1_0.7_x2_-1.07_coordinates.txt']
# actual = Alanine_dipeptide().get_many_cossin_from_coordinates_in_list_of_files(list_of_files)
# assert_equal(100, len(actual))
# assert_equal(8, len(actual[0]))
# expected = np.loadtxt('../tests/dependency/output_cossin.txt')
# assert_almost_equal(expected, actual)
# return
#
# @staticmethod
# def test_get_many_dihedrals_from_cossin():
# angle = [.4, -.7, math.pi, -.45]
# cossin = [[1, 0, -1, 0, 1, 0, -1, 0], [0, 1, 0, -1, 0, 1, 0, -1],
# reduce(lambda x, y: x + y, [[cos(x), sin(x)] for x in angle])
# ]
# actual = Alanine_dipeptide().get_many_dihedrals_from_cossin(cossin)
# expected = [[0, 0, 0, 0], [math.pi / 2, -math.pi / 2, math.pi / 2, -math.pi / 2], angle]
# for item in range(len(actual)):
# for index in range(4):
# assert_almost_equal(actual[item][index], expected[item][index], 4)
# return
#
# @staticmethod
# def test_get_many_dihedrals_from_coordinates_in_file():
# list_of_files = ['../tests/dependency/biased_output_fc_1000_x1_0.7_x2_-1.07_coordinates.txt']
# actual = Alanine_dipeptide.get_many_dihedrals_from_coordinates_in_file(list_of_files)
# expected = np.loadtxt('../tests/dependency/output_dihedrals.txt')
# assert_almost_equal(actual, expected)
# return
# @staticmethod
# def test_generate_coordinates_from_pdb_files():
# pdb_file_name = '../tests/dependency/temp_output_0.pdb'
# actual_output_file = pdb_file_name.replace('.pdb', '_coordinates.txt')
# expected_output_files = '../tests/dependency/temp_output_0_coor.txt'
# for interval in range(1, 10):
# if interval != 1:
# actual_output_file = pdb_file_name.replace('.pdb', '_int_%d_coordinates.txt' % interval)
# if os.path.exists(actual_output_file):
# subprocess.check_output(['rm', actual_output_file])
# Alanine_dipeptide.generate_coordinates_from_pdb_files(pdb_file_name, step_interval=interval)
# assert_equal(np.loadtxt(actual_output_file), np.loadtxt(expected_output_files)[::interval])
# subprocess.check_output(['rm', actual_output_file])
# return
class test_Trp_cage(object):
@staticmethod
def test_get_non_repeated_pairwise_distance_as_list_of_alpha_carbon():
pdb_file_list = ['../tests/dependency/temp_Trp_cage_data/1l2y.pdb']
a = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms(pdb_file_list)
a = [item.reshape(400, 1) for item in a]
b = Trp_cage.get_non_repeated_pairwise_distance(pdb_file_list)
assert (len(a) == len(b))
for _1 in range(len(b)):
for _2 in b[_1]:
assert (_2 in a[_1])
return
@staticmethod
def test_get_pairwise_distance_matrices_of_alpha_carbon():
actual = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms(['../tests/dependency/temp_Trp_cage_data/1l2y.pdb'])[0]
expected = np.loadtxt("../tests/dependency/test_get_pairwise_distance_matrices_of_alpha_carbon.txt")
assert_almost_equal(actual, expected)
return
@staticmethod
def test_rotating_dihedral_angles_and_save_to_pdb():
pdb_file = '../tests/dependency/temp_Trp_cage_data/1l2y.pdb'
output = 'temp_rotating_out.pdb'
target_dihedrals_list = [np.ones((38, 38)), np.zeros((38, 38))]
for target_dihedrals in target_dihedrals_list:
Trp_cage.rotating_dihedral_angles_and_save_to_pdb(pdb_file, target_dihedrals, output)
out_coor_file_list = Trp_cage.generate_coordinates_from_pdb_files(output)
actual_dihedrals = Trp_cage.get_many_dihedrals_from_coordinates_in_file(out_coor_file_list)
print(out_coor_file_list)
# print np.max(np.abs(actual_dihedrals - target_dihedrals))
assert_almost_equal(actual_dihedrals, target_dihedrals, decimal=2)
return
class test_coordinates_data_files_list(object):
@staticmethod
def test__init__():
folder = '../tests/dependency/temp_data'
num_of_coor_files = len(subprocess.check_output(['find', folder, '-name', "*_coordinates.npy"]).strip().split())
a = coordinates_data_files_list([folder])
assert len(a.get_list_of_coor_data_files()) == num_of_coor_files
assert a._list_num_frames == [100 for _ in range(num_of_coor_files)]
assert sorted(a.get_list_of_coor_data_files()) == a.get_list_of_coor_data_files()
assert len(a.get_list_of_corresponding_pdb_dcd()) == num_of_coor_files
assert sorted(a.get_list_of_corresponding_pdb_dcd()) == a.get_list_of_corresponding_pdb_dcd()
@staticmethod
def test_create_sub_coor_data_files_list_using_filter_conditional():
folder = '../tests/dependency/temp_data'
a = coordinates_data_files_list([folder])
a_sub = a.create_sub_coor_data_files_list_using_filter_conditional(lambda x: '0.7' in x)
for item in a_sub.get_list_of_coor_data_files():
assert ('0.7' in item)
return
@staticmethod
def test_get_pdb_name_and_corresponding_frame_index_with_global_coor_index():
_1 = coordinates_data_files_list(['../tests/dependency/temp_data/'])
pdb_files = _1.get_list_of_corresponding_pdb_dcd()
for item in range(1, 602, 100):
assert (_1.get_pdb_name_and_corresponding_frame_index_with_global_coor_index(item) == (pdb_files[item // 100], 1))
return
class test_autoencoder_Keras(object):
def __init__(self):
my_file_list = coordinates_data_files_list(['../tests/dependency/noncircular_alanine_exploration_data/'])
self._data = np.array(Alanine_dipeptide.get_many_cossin_from_coordinates_in_list_of_files(
my_file_list.get_list_of_coor_data_files()))
self._dihedrals = Alanine_dipeptide.get_many_dihedrals_from_cossin(self._data)
def test_train(self):
data, dihedrals = self._data, self._dihedrals
hidden_layers_list = [["Tanh", "Tanh", "Tanh", "Tanh", "Tanh", "Tanh", "Tanh"],
["Sigmoid", "Sigmoid", "Sigmoid", "Sigmoid", "Tanh", "Sigmoid", "Tanh"]]
model_type_list = [autoencoder_Keras, autoencoder_torch]
reg_list = [0.001, 0]
for item_activation in range(2):
for is_hi, hier_var in [(0, 0), (1,1), (1,2)]: # do not test variant 0 for now
for type_index, model_type in enumerate(model_type_list):
model = model_type(1447, data,
data_files=['/tmp/train_in.npy', '/tmp/train_out.npy'],
node_num=[8, 8, 15, 8, 2, 15, 8, 8, 8],
hidden_layers_types=hidden_layers_list[item_activation],
network_parameters = [0.02, 0.9,0, True, [reg_list[item_activation]]* 8],
batch_size=100, hierarchical=is_hi, hi_variant=hier_var,
epochs=50
)
model.train()
PCs = model.get_PCs()
[x, y] = list(zip(*PCs))
psi = [item[2] for item in dihedrals]
fig, ax = plt.subplots()
ax.scatter(x, y, c=psi, cmap='gist_rainbow')
ax.set_title("FVE = %f" % model.get_fraction_of_variance_explained())
file_name = 'try_model_type_%d_hierarchical_%d_%d_act_%d.pkl' % (
type_index, is_hi, hier_var, item_activation)
model.save_into_file(file_name)
fig.savefig(file_name.replace('.pkl', '.png'))
return
def test_train_with_different_mse_weights(self):
data, dihedrals = self._data, self._dihedrals
for _1, weights in enumerate([None, np.array([1,1,0,0,0,0,1,1]),
np.array([0,0,1,1,1,1,0,0]), np.array([1,1,1,1,0,0,0,0])]):
model = autoencoder_Keras(1447, data,
node_num=[8, 8, 15, 8, 2, 15, 8, 8, 8],
hidden_layers_types=["Tanh", "Tanh", "Tanh", "Tanh", "Tanh", "Tanh", "Tanh"],
network_parameters=[0.02, 0.9, 0, True, [0.001] * 8],
batch_size=100, hierarchical=0,
mse_weights=weights
)
_, history = model.train()
PCs = model.get_PCs()
[x, y] = list(zip(*PCs))
psi = [item[2] for item in dihedrals]
fig, ax = plt.subplots()
ax.scatter(x, y, c=psi, cmap='gist_rainbow')
model.save_into_file('try_diff_weights_%02d.pkl' % _1)
fig.savefig('try_diff_weights_%02d.png' % _1)
return
def test_train_2(self):
data, dihedrals = self._data, self._dihedrals
model = autoencoder_Keras(1447, data,
node_num=[8, 15, 4, 15, 8],
hidden_layers_types=["Tanh", "Circular", "Tanh"],
network_parameters = [0.1, 0.4,0, True, [0.001]* 4],
hierarchical=False
)
model.train()
PCs = model.get_PCs()
[x, y] = list(zip(*PCs))
psi = [item[2] for item in dihedrals]
fig, ax = plt.subplots()
ax.scatter(x, y, c=psi, cmap='gist_rainbow')
fig.savefig('try_keras_circular.png')
return
def test_save_into_file_and_load(self):
data = self._data
model = autoencoder_Keras(1447, data,
node_num=[8, 15, 2, 15, 8],
hidden_layers_types=["Tanh", "Tanh", "Tanh"],
network_parameters=[0.02, 0.9,0, True, [0.001]* 4],
batch_size=50,
data_files=['test_save_into_file.npy', 'test_save_into_file.npy']
)
model.train()
model.save_into_file('test_save_into_file.pkl')
model.save_into_file('test_save_into_file_fraction.pkl', fraction_of_data_to_be_saved=0.5)
model.save_into_file('temp_save/complicated/path/temp.pkl')
_ = autoencoder.load_from_pkl_file('test_save_into_file.pkl')
return
@staticmethod
def check_two_plumed_strings_containing_floats(string_1, string_2):
"""due to precision issue, string literals may not be exactly the same for two plumed strings, so we
need to explicitly compare the float values"""
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
split_1 = re.split(' |\n|=|,', string_1)
split_2 = re.split(' |\n|=|,', string_2)
assert (len(split_1) == len(split_2)), (len(split_1), len(split_2))
for _1, _2 in zip(split_1, split_2):
if is_float(_1):
assert_almost_equal(float(_1), float(_2), decimal=4)
else:
assert (_1 == _2), (_1, _2)
return
def test_get_plumed_script_for_biased_simulation_with_solute_pairwise_dis_and_solvent_cg_input_and_ANN(self):
scaling_factor_v = 26.9704478916
scaling_factor_u = 29.1703348377
r_high = 5.5
atom_indices = list(range(1, 25))
water_index_string = '75-11421:3'
ae = autoencoder.load_from_pkl_file('../tests/dependency/solute_plus_solvent_AE/temp_alpha_0.5.pkl')
with open('../tests/dependency/solute_plus_solvent_AE/temp_plumed.txt', 'r') as my_f:
expected_plumed = my_f.read().strip()
plumed_string = ae.get_plumed_script_for_biased_simulation_with_solute_pairwise_dis_and_solvent_cg_input_and_ANN(
list(range(1, 25)), scaling_factor_u, water_index_string, atom_indices, -5, r_high, scaling_factor_v)
self.check_two_plumed_strings_containing_floats(plumed_string, expected_plumed)
AE = autoencoder.load_from_pkl_file('../tests/dependency/solvent_AE/solvent_test.pkl')
with open('../tests/dependency/solvent_AE/temp_plumed.txt', 'r') as my_f:
expected_plumed = my_f.read().strip()
plumed_string = AE.get_plumed_script_for_biased_simulation_with_INDUS_cg_input_and_ANN(
water_index_string, atom_indices, -5, r_high, scaling_factor_v).strip()
self.check_two_plumed_strings_containing_floats(plumed_string, expected_plumed)
return
class test_autoencoder_torch(object):
@staticmethod
def test_general_train_save_and_load():
data = np.random.rand(1000, 21)
a = autoencoder_torch(1447, data,
output_data_set=data,
hierarchical=True, hi_variant=2,
batch_size=500,
node_num=[21, 100, 2, 100, 21],
hidden_layers_types=['tanh', 'tanh', 'tanh'], epochs=10)
a.train(lag_time=10)
a.save_into_file('/tmp/temp_save.pkl')
torch.save(a._ae, '/tmp/temp.df')
model_1 = torch.load('/tmp/temp.df')
torch.save(a._ae.state_dict(), '/tmp/temp_2.df')
model_2 = AE_net([21, 100, 2], [2, 100, 21], activations=a._hidden_layers_type + ['linear'],
hi_variant=2, hierarchical=True).cuda()
model_2.load_state_dict(torch.load('/tmp/temp_2.df'))
data_in = torch.rand(1000, 21).cuda()
assert_almost_equal(model_1(data_in)[0].cpu().data.numpy(), a._ae(data_in)[0].cpu().data.numpy())
assert_almost_equal(model_2(data_in)[0].cpu().data.numpy(), a._ae(data_in)[0].cpu().data.numpy())
_ = autoencoder_torch.load_from_pkl_file('/tmp/temp_save.pkl')
return
@staticmethod
def test_time_lagged_AE_stored_data_saving():
data = np.random.rand(1000, 21)
a = autoencoder_torch(1447, data,
output_data_set=data,
hierarchical=True, hi_variant=2,
rec_loss_type=1,
batch_size=500,
node_num=[21, 100, 2, 100, 21],
hidden_layers_types=['tanh', 'tanh', 'tanh'], epochs=10)
a.train(lag_time=10)
assert_almost_equal(a._data_set, data[:-10])
assert_almost_equal(a._output_data_set, data[10:])
return
@staticmethod
def test_save_and_load_data():
data = np.random.rand(1000, 21)
a = autoencoder_torch(1447, data,
output_data_set=data,
hierarchical=True,
batch_size=500,
node_num=[21, 100, 2, 100, 21], epochs=10,
data_files=['data.npy', 'data.npy'])
a.train(lag_time=0)
a.save_into_file('temp_save_pytorch/temp_save.pkl')
assert (os.path.isfile('temp_save_pytorch/data.npy'))
b = autoencoder_torch.load_from_pkl_file('temp_save_pytorch/temp_save.pkl')
assert_almost_equal(a._data_set, b._data_set)
assert_almost_equal(a._output_data_set, b._output_data_set)
return
class test_biased_simulation(object):
@staticmethod
def helper_biased_simulation_alanine_dipeptide(potential_center):
autoencoder_coeff_file = 'autoencoder_info_9.npy'
autoencoder_pkl_file = '../tests/dependency/test_biased_simulation/network_5.pkl'
my_network = autoencoder.load_from_pkl_file(autoencoder_pkl_file)
assert (isinstance(my_network, autoencoder))
my_network.write_coefficients_of_connections_into_file(autoencoder_coeff_file)
output_folder = 'temp_output_test_biased_simulation'
if os.path.exists(output_folder):
subprocess.check_output(['rm', '-rf', output_folder])
subprocess.check_output(
'python ../src/biased_simulation.py 50 5000 5000 %s %s pc_%s --num_of_nodes %s --layer_types %s --platform CPU --data_type_in_input_layer 1'
% (output_folder, autoencoder_coeff_file, potential_center, "21,40,2", "Tanh,Tanh"),
shell=True)
Alanine_dipeptide.generate_coordinates_from_pdb_files(output_folder)
fig, ax = plt.subplots()
input_data = coordinates_data_files_list([output_folder]).get_coor_data(0.5)
input_data = Sutils.remove_translation(input_data)
PCs = my_network.get_PCs(input_data)
x, y = list(zip(*PCs))
ax.scatter(x, y, c=list(range(len(x))), cmap='gist_rainbow', s=5)
potential_center_num = [float(item_1) for item_1 in potential_center.split(',')]
ax.scatter([potential_center_num[0]], [potential_center_num[1]], marker='X', s=30)
fig.savefig('test_biased_simulation_%s.png' % potential_center)
subprocess.check_output(['rm', '-rf', output_folder])
return
@staticmethod
def test_biased_simulation_alanine_dipeptide():
for item in ['-0.3,-0.7', '-0.3,-0.5', '-0.2,-0.4', '0,-0.4', '-0.1,-0.5']:
test_biased_simulation.helper_biased_simulation_alanine_dipeptide(item.replace(' ',''))
return
@staticmethod
def test_biased_simulation_alanine_dipeptide_with_metadynamics(use_well_tempered=0, biasfactor=-1):
autoencoder_pkl_file = '../tests/dependency/test_biased_simulation/network_5.pkl'
output_folder = 'temp_output_test_biased_simulation'
a = autoencoder.load_from_pkl_file(autoencoder_pkl_file)
a.write_expression_script_for_plumed('temp_info.txt', mode='ANN')
subprocess.check_output(
'python ../src/biased_simulation.py 50 50000 0 %s temp_info.txt pc_0,0 --MTD_pace 100 --platform CPU --bias_method MTD --MTD_biasfactor %f --MTD_WT %d --equilibration_steps 0'
% (output_folder, biasfactor, use_well_tempered), shell=True)
subprocess.check_output(['python', '../src/generate_coordinates.py', 'Alanine_dipeptide', '--path', output_folder])
fig, axes = plt.subplots(1, 3)
data = np.load(
output_folder + '/output_fc_0.000000_pc_[0.0,0.0]_coordinates.npy')
data /= 0.5
data = Sutils.remove_translation(data)
PCs = a.get_PCs(data)
ax = axes[0]
ax.set_xlabel('CV1')
ax.set_ylabel('CV2')
ax.set_title('CV data generated by autoencoder')
im = ax.scatter(PCs[:, 0], PCs[:, 1], c=list(range(PCs.shape[0])), cmap='gist_rainbow', s=4)
fig.colorbar(im, ax=ax)
out_data = np.loadtxt('temp_MTD_out.txt')
ax = axes[1]
im = ax.scatter(out_data[:, 1], out_data[:, 2], c=list(range(out_data.shape[0])), cmap='gist_rainbow', s=4)
ax.set_xlabel('CV1')
ax.set_ylabel('CV2')
ax.set_title('CV data generated by PLUMED')
fig.colorbar(im, ax=ax)
ax = axes[2]
dihedrals = Alanine_dipeptide.get_many_dihedrals_from_cossin(
Alanine_dipeptide.get_many_cossin_from_coordinates(data))
dihedrals = np.array(dihedrals)
im = ax.scatter(dihedrals[:, 1], dihedrals[:, 2], c=list(range(len(dihedrals))), cmap="gist_rainbow", s=4)
ax.set_xlabel('$\phi$')
ax.set_ylabel('$\psi$')
ax.set_title('data in phi-psi space')
fig.colorbar(im, ax=ax)
fig.set_size_inches((15, 5))
fig.savefig('metadynamics_biasfactor_%f.png' % biasfactor)
subprocess.check_output(['rm', '-rf', output_folder])
return
@staticmethod
def test_biased_simulation_alanine_dipeptide_with_metadynamics_multiple():
test_biased_simulation.test_biased_simulation_alanine_dipeptide_with_metadynamics(0, -1)
for item in [5, 20, 100]:
test_biased_simulation.test_biased_simulation_alanine_dipeptide_with_metadynamics(1, item)
return
class test_Helper_func(object):
@staticmethod
def test_compute_distances_min_image_convention():
output_pdb = 'out_for_computing_distances.pdb'
subprocess.check_output(['python', '../src/biased_simulation_general.py', 'Trp_cage', '50', '1000', '0', 'temp_out_12345',
'none', 'pc_0,0', 'explicit', 'NPT', '--platform', 'CUDA', '--device', '0', '--out_traj', output_pdb])
import mdtraj as md
box_length = 4.5 # in nm
temp_t = md.load(output_pdb)
temp_t.unitcell_lengths, temp_t.unitcell_angles = box_length * np.ones((20, 3)), 90 * np.ones((20, 3))
temp_u = Universe(output_pdb)
a_sel = temp_u.select_atoms('name N')
b_sel = temp_u.select_atoms('name O and resname HOH')
absolute_index = b_sel.atoms.indices[30]
b_positions = np.array([b_sel.positions for _ in temp_u.trajectory])
b_positions = b_positions.reshape(20, b_positions.shape[1] * b_positions.shape[2])
a_positions = np.array([a_sel.positions for _ in temp_u.trajectory])
a_positions = a_positions.reshape(20, a_positions.shape[1] * a_positions.shape[2])
result = Helper_func.compute_distances_min_image_convention(a_positions, b_positions, 10 * box_length)
assert_almost_equal(md.compute_distances(temp_t, [[0, absolute_index]]).flatten(), result[:, 0, 30] / 10, decimal=4)
subprocess.check_output(['rm', '-rf', output_pdb, 'temp_out_12345'])
return
@staticmethod
def test_shuffle_multiple_arrays():
a = np.random.rand(10, 2)
b1, b2 =Helper_func.shuffle_multiple_arrays([a[:, 0], a[:, 1]])
for item in range(10):
assert( [b1[item], b2[item]] in a)
return
@staticmethod
def test_attempt_to_save_npy():
import shutil
def get_num_files_in_folder(temp_folder): return len(os.listdir(temp_folder))
a = 2 * np.eye(3, 3)
folder = 'temp_test_save_npy'
if os.path.exists(folder): shutil.rmtree(folder)
os.mkdir(folder)
filename = folder + '/1.npy'
Helper_func.attempt_to_save_npy(filename, a)
assert (os.path.isfile(filename))
assert (get_num_files_in_folder(folder) == 1)
Helper_func.attempt_to_save_npy(filename, a)
assert (get_num_files_in_folder(folder) == 1)
for item in range(10):
Helper_func.attempt_to_save_npy(filename, a+item)
assert (get_num_files_in_folder(folder) == item + 1)
shutil.rmtree(folder)
return
class test_others(object):
@staticmethod
def test_SphSh_INDUS_PLUMED_plugin():
# test for original version of SphSh_INDUS_PLUMED_plugin provided by Prof. Amish Patel
num_frames = 20
potential_center = np.random.random(size=3) * 3
with open('temp_plumed.txt', 'w') as my_f:
my_f.write('''
SPHSH ATOMS=306-11390:4 XCEN=%f YCEN=%f ZCEN=%f RLOW=-0.5 RHIGH=0.311 SIGMA=0.01 CUTOFF=0.02 LABEL=sph
SPHSH ATOMS=306-11390:4 XCEN=%f YCEN=%f ZCEN=%f RLOW=0.05 RHIGH=0.311 SIGMA=0.01 CUTOFF=0.02 LABEL=sph_2
RESTRAINT ARG=sph.Ntw AT=5 KAPPA=5 SLOPE=0 LABEL=mypotential
PRINT STRIDE=50 ARG=sph.N,sph.Ntw,sph_2.N,sph_2.Ntw FILE=NDATA''' \
% (potential_center[0], potential_center[1], potential_center[2],
potential_center[0], potential_center[1], potential_center[2])) # since there is "TER" separating solute and solvent in pdb file, so index should start with 306, not 307
out_pdb = 'temp_plumed/output_fc_0.0_pc_[0.0,0.0]_T_300_explicit_NPT.pdb'
subprocess.check_output(['python', '../src/biased_simulation_general.py', 'Trp_cage', '50', '1000', '0',
'temp_plumed', 'none', 'pc_0,0', 'explicit', 'NPT', '--platform', 'CUDA',
'--bias_method', 'plumed_other', '--plumed_file', 'temp_plumed.txt',
'--out_traj', out_pdb])
temp_u = Universe(out_pdb)
reporter_file = out_pdb.replace('output', 'report').replace('.pdb', '.txt')
box_length_list = Helper_func.get_box_length_list_fom_reporter_file(reporter_file, unit='A')
O_sel = temp_u.select_atoms('name O and resname HOH')
O_coords = np.array([O_sel.positions for _ in temp_u.trajectory]).reshape(num_frames, 2772 * 3)
distances = Helper_func.compute_distances_min_image_convention(
10 * np.array([potential_center for _ in range(num_frames)]), O_coords, box_length_list)
coarse_count, actual_count = Helper_func.get_cg_count_in_sphere(distances, 3.11, 0.2, .1)
plumed_count = np.loadtxt('NDATA')
assert_almost_equal(plumed_count[-num_frames:, 1], actual_count.flatten())
assert_almost_equal(plumed_count[-num_frames:, 2], coarse_count.flatten(), decimal=2)
coarse_count_1, actual_count_1 = Helper_func.get_cg_count_in_shell(distances, 0.5, 3.11, 0.2, .1)
assert_almost_equal(plumed_count[-num_frames:, 3], actual_count_1.flatten())
assert_almost_equal(plumed_count[-num_frames:, 4], coarse_count_1.flatten(), decimal=2)
subprocess.check_output(['rm', '-rf', 'temp_plumed', 'NDATA', 'temp_plumed.txt'])
return
@staticmethod
def test_SphShMod_INDUS_PLUMED_plugin():
num_frames = 20
with open('temp_plumed.txt', 'w') as my_f:
my_f.write('''
SPHSHMOD ATOMS=306-11390:4 ATOMREF=1 RLOW=-0.5 RHIGH=0.311 SIGMA=0.01 CUTOFF=0.02 LABEL=sph
RESTRAINT ARG=sph.Ntw AT=10 KAPPA=5 SLOPE=0 LABEL=mypotential
PRINT STRIDE=50 ARG=sph.N,sph.Ntw FILE=NDATA''' )
out_pdb = 'temp_plumed/output_fc_0.0_pc_[0.0,0.0]_T_300_explicit_NPT.pdb'
subprocess.check_output(['python', '../src/biased_simulation_general.py', 'Trp_cage', '50', '1000', '0',
'temp_plumed', 'none', 'pc_0,0', 'explicit', 'NPT', '--platform', 'CUDA',
'--bias_method', 'plumed_other', '--plumed_file', 'temp_plumed.txt',
'--out_traj', out_pdb])
temp_u = Universe(out_pdb)
reporter_file = out_pdb.replace('output', 'report').replace('.pdb', '.txt')
box_length_list = Helper_func.get_box_length_list_fom_reporter_file(reporter_file, unit='A')
print(box_length_list)
O_sel = temp_u.select_atoms('name O and resname HOH')
N_sel = temp_u.select_atoms('resnum 1 and name N')
O_coords = np.array([O_sel.positions for _ in temp_u.trajectory]).reshape(num_frames, 2772 * 3)
N_coords = np.array([N_sel.positions for _ in temp_u.trajectory]).reshape(num_frames, 3)
distances = Helper_func.compute_distances_min_image_convention(N_coords, O_coords, box_length_list)
coarse_count, actual_count = Helper_func.get_cg_count_in_sphere(distances, 3.11, 0.2, .1)
plumed_count = np.loadtxt('NDATA')
assert_almost_equal(plumed_count[-num_frames:, 1], actual_count.flatten())
assert_almost_equal(plumed_count[-num_frames:, 2], coarse_count.flatten(), decimal=2)
subprocess.check_output(['rm', '-rf', 'temp_plumed', 'NDATA', 'temp_plumed.txt'])
return
|
|
import datetime
import docker
import multiprocessing
import os
import pkg_resources
import platform
import subprocess
from vent.api.plugins import Plugin
from vent.api.templates import Template
from vent.helpers.paths import PathDirs
def Version():
""" Get Vent version """
version = ''
try:
version = "v"+pkg_resources.require("vent")[0].version
except Exception as e: # pragma: no cover
pass
return version
def System():
""" Get system operating system """
return platform.system()
def Docker():
""" Get Docker setup information """
docker_info = {'server':{}, 'env':'', 'type':'', 'os':''}
# get docker server version
try:
d_client = docker.from_env()
docker_info['server'] = d_client.version()
except Exception as e: # pragma: no cover
pass
# get operating system
system = System()
docker_info['os'] = system
# check if native or using docker-machine
if 'DOCKER_MACHINE_NAME' in os.environ:
# using docker-machine
docker_info['env'] = os.environ['DOCKER_MACHINE_NAME']
docker_info['type'] = 'docker-machine'
elif 'DOCKER_HOST' in os.environ:
# not native
docker_info['env'] = os.environ['DOCKER_HOST']
docker_info['type'] = 'remote'
else:
# using "local" server
docker_info['type'] = 'native'
return docker_info
def Containers(vent=True, running=True):
"""
Get containers that are created, by default limit to vent containers that
are running
"""
containers = []
try:
d_client = docker.from_env()
if vent:
c = d_client.containers.list(all=not running, filters={'label':'vent'})
else:
c = d_client.containers.list(all=not running)
for container in c:
containers.append((container.name, container.status))
except Exception as e: # pragma: no cover
pass
return containers
def Cpu():
cpu = "Unknown"
try:
cpu = str(multiprocessing.cpu_count())
except Exception as e: # pragma: no cover
pass
return cpu
def Gpu():
gpu = ""
try:
d_client = docker.from_env()
nvidia_image = d_client.images.list(name='nvidia/cuda:8.0-runtime')
if len(nvidia_image) > 0:
proc = subprocess.Popen(['nvidia-docker run --rm nvidia/cuda:8.0-runtime nvidia-smi -L'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, close_fds=True)
gpus = proc.stdout.read()
if gpus:
for line in gpus.strip().split("\n"):
gpu += line.split(" (UUID: ")[0] + ", "
gpu = gpu[:-2]
else:
gpu = "None"
else:
gpu = "None"
except Exception as e: # pragma: no cover
gpu = "Unknown"
return gpu
def Images(vent=True):
""" Get images that are build, by default limit to vent images """
images = []
try:
d_client = docker.from_env()
if vent:
i = d_client.images.list(filters={'label':'vent'})
else:
i = d_client.images.list()
for image in i:
images.append((image.tags[0], image.short_id))
except Exception as e: # pragma: no cover
pass
return images
def Jobs():
"""
Get the number of jobs that are running and finished, and the number of
total tools running and finished for those jobs
"""
jobs = [0, 0, 0, 0]
# get running jobs
try:
d_client = docker.from_env()
c = d_client.containers.list(all=False, filters={'label':'vent-plugin'})
files = []
for container in c:
jobs[1] += 1
if 'file' in container.attrs['Config']['Labels']:
if container.attrs['Config']['Labels']['file'] not in files:
files.append(container.attrs['Config']['Labels']['file'])
jobs[0] = len(files)
except Exception as e: # pragma: no cover
pass
# get finished jobs
try:
d_client = docker.from_env()
c = d_client.containers.list(all=True, filters={'label':'vent-plugin'})
files = []
for container in c:
jobs[3] += 1
if 'file' in container.attrs['Config']['Labels']:
if container.attrs['Config']['Labels']['file'] not in files:
files.append(container.attrs['Config']['Labels']['file'])
jobs[2] = len(files)-jobs[0]
jobs[3] = jobs[3]-jobs[1]
except Exception as e: # pragma: no cover
pass
return tuple(jobs)
def Tools(**kargs):
""" Get tools that exist in the manifest """
path_dirs = PathDirs(**kargs)
manifest = os.path.join(path_dirs.meta_dir, "plugin_manifest.cfg")
template = Template(template=manifest)
tools = template.sections()
return tools[1]
def Services(vent=True):
"""
Get services that have exposed ports, by default limit to vent containers
"""
services = []
try:
d_client = docker.from_env()
if vent:
containers = d_client.containers.list(filters={'label':'vent'})
else:
containers = d_client.containers.list()
for container in containers:
if vent:
name = container.attrs['Config']['Labels']['vent.name']
else:
name = container.name
ports = container.attrs['NetworkSettings']['Ports']
p = []
for port in ports:
if ports[port]:
p.append(ports[port][0]['HostIp']+":"+ports[port][0]['HostPort'])
if p:
services.append((name, p))
except Exception as e: # pragma: no cover
pass
return services
def Core(branch="master", **kargs):
"""
Get the normal core tools, and the currently installed/built/running ones,
including custom core services
"""
# !! TODO this might need to store namespaces/branches/versions
core = {'built':[], 'running':[], 'installed':[], 'normal':[]}
# get normal core tools
plugins = Plugin(plugins_dir=".internals/plugins")
status, cwd = plugins.clone('https://github.com/cyberreboot/vent')
if status:
plugins.version = 'HEAD'
plugins.branch = branch
response = plugins.checkout()
matches = plugins._available_tools(groups='core')
for match in matches:
core['normal'].append(match[0].split('/')[-1])
else:
core['normal'] = 'failed'
# get core tools that have been installed
path_dirs = PathDirs(**kargs)
manifest = os.path.join(path_dirs.meta_dir, "plugin_manifest.cfg")
template = Template(template=manifest)
tools = template.sections()
if tools[0]:
for tool in tools[1]:
groups = template.option(tool, "groups")
if groups[0] and "core" in groups[1]:
name = template.option(tool, "name")
if name[0]:
core['installed'].append(name[1])
# get core tools that have been built and/or are running
try:
d_client = docker.from_env()
images = d_client.images.list()
for image in images:
try:
if "vent.groups" in image.attrs['Labels'] and 'core' in image.attrs['Labels']['vent.groups']:
if 'vent.name' in image.attrs['Labels']:
core['built'].append(image.attrs['Labels']['vent.name'])
except Exception as err: # pragma: no cover
pass
containers = d_client.containers.list()
for container in containers:
try:
if "vent.groups" in container.attrs['Config']['Labels'] and 'core' in container.attrs['Config']['Labels']['vent.groups']:
if 'vent.name' in container.attrs['Config']['Labels']:
core['running'].append(container.attrs['Config']['Labels']['vent.name'])
except Exception as err: # pragma: no cover
pass
except Exception as e: # pragma: no cover
pass
return core
def Timestamp():
""" Get the current datetime in UTC """
timestamp = ""
try:
timestamp = str(datetime.datetime.now())+" UTC"
except Exception as e: # pragma: no cover
pass
return timestamp
def Uptime():
""" Get the current uptime information """
uptime = ""
try:
uptime = str(subprocess.check_output(["uptime"], close_fds=True))[1:]
except Exception as e: # pragma: no cover
pass
return uptime
|
|
from django import oldforms, template
from django.conf import settings
from django.contrib.admin.filterspecs import FilterSpec
from django.contrib.admin.views.decorators import staff_member_required
from django.views.decorators.cache import never_cache
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist, PermissionDenied
from django.core.paginator import QuerySetPaginator, InvalidPage
from django.shortcuts import get_object_or_404, render_to_response
from django.db import models
from django.db.models.query import QuerySet
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.utils.html import escape
from django.utils.text import capfirst, get_text_list
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import operator
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
if not LogEntry._meta.installed:
raise ImproperlyConfigured, "You'll need to put 'django.contrib.admin' in your INSTALLED_APPS setting before you can use the admin application."
if 'django.core.context_processors.auth' not in settings.TEMPLATE_CONTEXT_PROCESSORS:
raise ImproperlyConfigured, "You'll need to put 'django.core.context_processors.auth' in your TEMPLATE_CONTEXT_PROCESSORS setting before you can use the admin application."
# The system will display a "Show all" link on the change list only if the
# total result count is less than or equal to this setting.
MAX_SHOW_ALL_ALLOWED = 200
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = '(None)'
use_raw_id_admin = lambda field: isinstance(field.rel, (models.ManyToOneRel, models.ManyToManyRel)) and field.rel.raw_id_admin
class IncorrectLookupParameters(Exception):
pass
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' characters. Similar to urllib.quote, except that the
quoting is slightly different so that it doesn't get automatically
unquoted by the Web browser.
"""
if type(s) != type(''):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in ':/_':
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def get_javascript_imports(opts, auto_populated_fields, field_sets):
# Put in any necessary JavaScript imports.
js = ['js/core.js', 'js/admin/RelatedObjectLookups.js']
if auto_populated_fields:
js.append('js/urlify.js')
if opts.has_field_type(models.DateTimeField) or opts.has_field_type(models.TimeField) or opts.has_field_type(models.DateField):
js.extend(['js/calendar.js', 'js/admin/DateTimeShortcuts.js'])
if opts.get_ordered_objects():
js.extend(['js/getElementsBySelector.js', 'js/dom-drag.js' , 'js/admin/ordering.js'])
if opts.admin.js:
js.extend(opts.admin.js)
seen_collapse = False
for field_set in field_sets:
if not seen_collapse and 'collapse' in field_set.classes:
seen_collapse = True
js.append('js/admin/CollapsedFieldsets.js')
for field_line in field_set:
try:
for f in field_line:
if f.rel and isinstance(f, models.ManyToManyField) and f.rel.filter_interface:
js.extend(['js/SelectBox.js' , 'js/SelectFilter2.js'])
raise StopIteration
except StopIteration:
break
return js
class AdminBoundField(object):
def __init__(self, field, field_mapping, original):
self.field = field
self.original = original
self.form_fields = [field_mapping[name] for name in self.field.get_manipulator_field_names('')]
self.element_id = self.form_fields[0].get_id()
self.has_label_first = not isinstance(self.field, models.BooleanField)
self.raw_id_admin = use_raw_id_admin(field)
self.is_date_time = isinstance(field, models.DateTimeField)
self.is_file_field = isinstance(field, models.FileField)
self.needs_add_label = field.rel and (isinstance(field.rel, models.ManyToOneRel) or isinstance(field.rel, models.ManyToManyRel)) and field.rel.to._meta.admin
self.hidden = isinstance(self.field, models.AutoField)
self.first = False
classes = []
if self.raw_id_admin:
classes.append('nowrap')
if max([bool(f.errors()) for f in self.form_fields]):
classes.append('error')
if classes:
self.cell_class_attribute = u' class="%s" ' % ' '.join(classes)
self._repr_filled = False
if field.rel:
self.related_url = mark_safe(u'../../../%s/%s/'
% (field.rel.to._meta.app_label,
field.rel.to._meta.object_name.lower()))
def original_value(self):
if self.original:
return self.original.__dict__[self.field.attname]
def existing_display(self):
try:
return self._display
except AttributeError:
if isinstance(self.field.rel, models.ManyToOneRel):
self._display = force_unicode(getattr(self.original, self.field.name), strings_only=True)
elif isinstance(self.field.rel, models.ManyToManyRel):
self._display = u", ".join([force_unicode(obj) for obj in getattr(self.original, self.field.name).all()])
return self._display
def __repr__(self):
return repr(self.__dict__)
def html_error_list(self):
return mark_safe(" ".join([form_field.html_error_list() for form_field in self.form_fields if form_field.errors]))
def original_url(self):
if self.is_file_field and self.original and self.field.attname:
url_method = getattr(self.original, 'get_%s_url' % self.field.attname)
if callable(url_method):
return url_method()
return ''
class AdminBoundFieldLine(object):
def __init__(self, field_line, field_mapping, original):
self.bound_fields = [field.bind(field_mapping, original, AdminBoundField) for field in field_line]
for bound_field in self:
bound_field.first = True
break
def __iter__(self):
for bound_field in self.bound_fields:
yield bound_field
def __len__(self):
return len(self.bound_fields)
class AdminBoundFieldSet(object):
def __init__(self, field_set, field_mapping, original):
self.name = field_set.name
self.classes = field_set.classes
self.description = field_set.description
self.bound_field_lines = [field_line.bind(field_mapping, original, AdminBoundFieldLine) for field_line in field_set]
def __iter__(self):
for bound_field_line in self.bound_field_lines:
yield bound_field_line
def __len__(self):
return len(self.bound_field_lines)
def render_change_form(model, manipulator, context, add=False, change=False, form_url=''):
opts = model._meta
app_label = opts.app_label
auto_populated_fields = [f for f in opts.fields if f.prepopulate_from]
field_sets = opts.admin.get_field_sets(opts)
original = getattr(manipulator, 'original_object', None)
bound_field_sets = [field_set.bind(context['form'], original, AdminBoundFieldSet) for field_set in field_sets]
first_form_field_id = bound_field_sets[0].bound_field_lines[0].bound_fields[0].form_fields[0].get_id();
ordered_objects = opts.get_ordered_objects()
inline_related_objects = opts.get_followed_related_objects(manipulator.follow)
extra_context = {
'add': add,
'change': change,
'has_delete_permission': context['perms'][app_label][opts.get_delete_permission()],
'has_change_permission': context['perms'][app_label][opts.get_change_permission()],
'has_file_field': opts.has_field_type(models.FileField),
'has_absolute_url': hasattr(model, 'get_absolute_url'),
'auto_populated_fields': auto_populated_fields,
'bound_field_sets': bound_field_sets,
'first_form_field_id': first_form_field_id,
'javascript_imports': get_javascript_imports(opts, auto_populated_fields, field_sets),
'ordered_objects': ordered_objects,
'inline_related_objects': inline_related_objects,
'form_url': mark_safe(form_url),
'opts': opts,
'content_type_id': ContentType.objects.get_for_model(model).id,
}
context.update(extra_context)
return render_to_response([
"admin/%s/%s/change_form.html" % (app_label, opts.object_name.lower()),
"admin/%s/change_form.html" % app_label,
"admin/change_form.html"], context_instance=context)
def index(request):
return render_to_response('admin/index.html', {'title': _('Site administration')}, context_instance=template.RequestContext(request))
index = staff_member_required(never_cache(index))
def add_stage(request, app_label, model_name, show_delete=False, form_url='', post_url=None, post_url_continue='../%s/', object_id_override=None):
model = models.get_model(app_label, model_name)
if model is None:
raise Http404("App %r, model %r, not found" % (app_label, model_name))
opts = model._meta
if not request.user.has_perm(app_label + '.' + opts.get_add_permission()):
raise PermissionDenied
if post_url is None:
if request.user.has_perm(app_label + '.' + opts.get_change_permission()):
# redirect to list view
post_url = '../'
else:
# Object list will give 'Permission Denied', so go back to admin home
post_url = '../../../'
manipulator = model.AddManipulator()
if request.POST:
new_data = request.POST.copy()
if opts.has_field_type(models.FileField):
new_data.update(request.FILES)
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if not errors:
new_object = manipulator.save(new_data)
pk_value = new_object._get_pk_val()
LogEntry.objects.log_action(request.user.id, ContentType.objects.get_for_model(model).id, pk_value, force_unicode(new_object), ADDITION)
msg = _('The %(name)s "%(obj)s" was added successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(new_object)}
# Here, we distinguish between different save types by checking for
# the presence of keys in request.POST.
if "_continue" in request.POST:
request.user.message_set.create(message=msg + ' ' + _("You may edit it again below."))
if "_popup" in request.POST:
post_url_continue += "?_popup=1"
return HttpResponseRedirect(post_url_continue % pk_value)
if "_popup" in request.POST:
return HttpResponse('<script type="text/javascript">opener.dismissAddAnotherPopup(window, "%s", "%s");</script>' % \
# escape() calls force_unicode.
(escape(pk_value), escape(new_object)))
elif "_addanother" in request.POST:
request.user.message_set.create(message=msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect(request.path)
else:
request.user.message_set.create(message=msg)
return HttpResponseRedirect(post_url)
else:
# Add default data.
new_data = manipulator.flatten_data()
# Override the defaults with GET params, if they exist.
new_data.update(dict(request.GET.items()))
errors = {}
# Populate the FormWrapper.
form = oldforms.FormWrapper(manipulator, new_data, errors)
c = template.RequestContext(request, {
'title': _('Add %s') % force_unicode(opts.verbose_name),
'form': form,
'is_popup': '_popup' in request.REQUEST,
'show_delete': show_delete,
})
if object_id_override is not None:
c['object_id'] = object_id_override
return render_change_form(model, manipulator, c, add=True)
add_stage = staff_member_required(never_cache(add_stage))
def change_stage(request, app_label, model_name, object_id):
model = models.get_model(app_label, model_name)
object_id = unquote(object_id)
if model is None:
raise Http404("App %r, model %r, not found" % (app_label, model_name))
opts = model._meta
if not request.user.has_perm(app_label + '.' + opts.get_change_permission()):
raise PermissionDenied
if request.POST and "_saveasnew" in request.POST:
return add_stage(request, app_label, model_name, form_url='../../add/')
try:
manipulator = model.ChangeManipulator(object_id)
except model.DoesNotExist:
raise Http404('%s object with primary key %r does not exist' % (model_name, escape(object_id)))
if request.POST:
new_data = request.POST.copy()
if opts.has_field_type(models.FileField):
new_data.update(request.FILES)
errors = manipulator.get_validation_errors(new_data)
manipulator.do_html2python(new_data)
if not errors:
new_object = manipulator.save(new_data)
pk_value = new_object._get_pk_val()
# Construct the change message.
change_message = []
if manipulator.fields_added:
change_message.append(_('Added %s.') % get_text_list(manipulator.fields_added, _('and')))
if manipulator.fields_changed:
change_message.append(_('Changed %s.') % get_text_list(manipulator.fields_changed, _('and')))
if manipulator.fields_deleted:
change_message.append(_('Deleted %s.') % get_text_list(manipulator.fields_deleted, _('and')))
change_message = ' '.join(change_message)
if not change_message:
change_message = _('No fields changed.')
LogEntry.objects.log_action(request.user.id, ContentType.objects.get_for_model(model).id, pk_value, force_unicode(new_object), CHANGE, change_message)
msg = _('The %(name)s "%(obj)s" was changed successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(new_object)}
if "_continue" in request.POST:
request.user.message_set.create(message=msg + ' ' + _("You may edit it again below."))
if '_popup' in request.REQUEST:
return HttpResponseRedirect(request.path + "?_popup=1")
else:
return HttpResponseRedirect(request.path)
elif "_saveasnew" in request.POST:
request.user.message_set.create(message=_('The %(name)s "%(obj)s" was added successfully. You may edit it again below.') % {'name': force_unicode(opts.verbose_name), 'obj': force_unicode(new_object)})
return HttpResponseRedirect("../%s/" % pk_value)
elif "_addanother" in request.POST:
request.user.message_set.create(message=msg + ' ' + (_("You may add another %s below.") % force_unicode(opts.verbose_name)))
return HttpResponseRedirect("../add/")
else:
request.user.message_set.create(message=msg)
return HttpResponseRedirect("../")
else:
# Populate new_data with a "flattened" version of the current data.
new_data = manipulator.flatten_data()
# TODO: do this in flatten_data...
# If the object has ordered objects on its admin page, get the existing
# order and flatten it into a comma-separated list of IDs.
id_order_list = []
for rel_obj in opts.get_ordered_objects():
id_order_list.extend(getattr(manipulator.original_object, 'get_%s_order' % rel_obj.object_name.lower())())
if id_order_list:
new_data['order_'] = ','.join(map(str, id_order_list))
errors = {}
# Populate the FormWrapper.
form = oldforms.FormWrapper(manipulator, new_data, errors)
form.original = manipulator.original_object
form.order_objects = []
#TODO Should be done in flatten_data / FormWrapper construction
for related in opts.get_followed_related_objects():
wrt = related.opts.order_with_respect_to
if wrt and wrt.rel and wrt.rel.to == opts:
func = getattr(manipulator.original_object, 'get_%s_list' %
related.get_accessor_name())
orig_list = func()
form.order_objects.extend(orig_list)
c = template.RequestContext(request, {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'form': form,
'object_id': object_id,
'original': manipulator.original_object,
'is_popup': '_popup' in request.REQUEST,
})
return render_change_form(model, manipulator, c, change=True)
change_stage = staff_member_required(never_cache(change_stage))
def _nest_help(obj, depth, val):
current = obj
for i in range(depth):
current = current[-1]
current.append(val)
def _get_deleted_objects(deleted_objects, perms_needed, user, obj, opts, current_depth):
"Helper function that recursively populates deleted_objects."
nh = _nest_help # Bind to local variable for performance
if current_depth > 16:
return # Avoid recursing too deep.
opts_seen = []
for related in opts.get_all_related_objects():
if related.opts in opts_seen:
continue
opts_seen.append(related.opts)
rel_opts_name = related.get_accessor_name()
if isinstance(related.field.rel, models.OneToOneRel):
try:
sub_obj = getattr(obj, rel_opts_name)
except ObjectDoesNotExist:
pass
else:
if related.opts.admin:
p = '%s.%s' % (related.opts.app_label, related.opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
# We don't care about populating deleted_objects now.
continue
if related.field.rel.edit_inline or not related.opts.admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [mark_safe(u'%s: %s' % (force_unicode(capfirst(related.opts.verbose_name)), sub_obj)), []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [mark_safe(u'%s: <a href="../../../../%s/%s/%s/">%s</a>' %
(escape(force_unicode(capfirst(related.opts.verbose_name))),
related.opts.app_label,
related.opts.object_name.lower(),
sub_obj._get_pk_val(), sub_obj)), []])
_get_deleted_objects(deleted_objects, perms_needed, user, sub_obj, related.opts, current_depth+2)
else:
has_related_objs = False
for sub_obj in getattr(obj, rel_opts_name).all():
has_related_objs = True
if related.field.rel.edit_inline or not related.opts.admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [u'%s: %s' % (force_unicode(capfirst(related.opts.verbose_name)), escape(sub_obj)), []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [mark_safe(u'%s: <a href="../../../../%s/%s/%s/">%s</a>' % \
(escape(force_unicode(capfirst(related.opts.verbose_name))), related.opts.app_label, related.opts.object_name.lower(), sub_obj._get_pk_val(), escape(sub_obj))), []])
_get_deleted_objects(deleted_objects, perms_needed, user, sub_obj, related.opts, current_depth+2)
# If there were related objects, and the user doesn't have
# permission to delete them, add the missing perm to perms_needed.
if related.opts.admin and has_related_objs:
p = '%s.%s' % (related.opts.app_label, related.opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
for related in opts.get_all_related_many_to_many_objects():
if related.opts in opts_seen:
continue
opts_seen.append(related.opts)
rel_opts_name = related.get_accessor_name()
has_related_objs = False
# related.get_accessor_name() could return None for symmetrical relationships
if rel_opts_name:
rel_objs = getattr(obj, rel_opts_name, None)
if rel_objs:
has_related_objs = True
if has_related_objs:
for sub_obj in rel_objs.all():
if related.field.rel.edit_inline or not related.opts.admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [_('One or more %(fieldname)s in %(name)s: %(obj)s') % \
{'fieldname': force_unicode(related.field.verbose_name), 'name': force_unicode(related.opts.verbose_name), 'obj': escape(sub_obj)}, []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [
mark_safe((_('One or more %(fieldname)s in %(name)s:') % {'fieldname': escape(force_unicode(related.field.verbose_name)), 'name': escape(force_unicode(related.opts.verbose_name))}) + \
(u' <a href="../../../../%s/%s/%s/">%s</a>' % \
(related.opts.app_label, related.opts.module_name, sub_obj._get_pk_val(), escape(sub_obj)))), []])
# If there were related objects, and the user doesn't have
# permission to change them, add the missing perm to perms_needed.
if related.opts.admin and has_related_objs:
p = u'%s.%s' % (related.opts.app_label, related.opts.get_change_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
def delete_stage(request, app_label, model_name, object_id):
model = models.get_model(app_label, model_name)
object_id = unquote(object_id)
if model is None:
raise Http404("App %r, model %r, not found" % (app_label, model_name))
opts = model._meta
if not request.user.has_perm(app_label + '.' + opts.get_delete_permission()):
raise PermissionDenied
obj = get_object_or_404(model, pk=object_id)
# Populate deleted_objects, a data structure of all related objects that
# will also be deleted.
deleted_objects = [mark_safe(u'%s: <a href="../../%s/">%s</a>' % (escape(force_unicode(capfirst(opts.verbose_name))), force_unicode(object_id), escape(obj))), []]
perms_needed = set()
_get_deleted_objects(deleted_objects, perms_needed, request.user, obj, opts, 1)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied
obj_display = force_unicode(obj)
obj.delete()
LogEntry.objects.log_action(request.user.id, ContentType.objects.get_for_model(model).id, object_id, obj_display, DELETION)
request.user.message_set.create(message=_('The %(name)s "%(obj)s" was deleted successfully.') % {'name': force_unicode(opts.verbose_name), 'obj': obj_display})
return HttpResponseRedirect("../../")
extra_context = {
"title": _("Are you sure?"),
"object_name": force_unicode(opts.verbose_name),
"object": obj,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"opts": model._meta,
}
return render_to_response(["admin/%s/%s/delete_confirmation.html" % (app_label, opts.object_name.lower() ),
"admin/%s/delete_confirmation.html" % app_label ,
"admin/delete_confirmation.html"], extra_context, context_instance=template.RequestContext(request))
delete_stage = staff_member_required(never_cache(delete_stage))
def history(request, app_label, model_name, object_id):
model = models.get_model(app_label, model_name)
object_id = unquote(object_id)
if model is None:
raise Http404("App %r, model %r, not found" % (app_label, model_name))
action_list = LogEntry.objects.filter(object_id=object_id,
content_type__id__exact=ContentType.objects.get_for_model(model).id).select_related().order_by('action_time')
# If no history was found, see whether this object even exists.
obj = get_object_or_404(model, pk=object_id)
extra_context = {
'title': _('Change history: %s') % obj,
'action_list': action_list,
'module_name': force_unicode(capfirst(model._meta.verbose_name_plural)),
'object': obj,
}
return render_to_response(["admin/%s/%s/object_history.html" % (app_label, model._meta.object_name.lower()),
"admin/%s/object_history.html" % app_label ,
"admin/object_history.html"], extra_context, context_instance=template.RequestContext(request))
history = staff_member_required(never_cache(history))
class ChangeList(object):
def __init__(self, request, model):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.manager = self.opts.admin.manager
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
self.order_field, self.order_type = self.get_ordering()
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set()
self.get_results(request)
self.title = (self.is_popup and _('Select %s') % force_unicode(self.opts.verbose_name) or _('Select %s to change') % force_unicode(self.opts.verbose_name))
self.filter_specs, self.has_filters = self.get_filters(request)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
filter_specs = []
if self.lookup_opts.admin.list_filter and not self.opts.one_to_one_field:
filter_fields = [self.lookup_opts.get_field(field_name) \
for field_name in self.lookup_opts.admin.list_filter]
for f in filter_fields:
spec = FilterSpec.create(f, request, self.params, self.model)
if spec and spec.has_output():
filter_specs.append(spec)
return filter_specs, bool(filter_specs)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if k in p and v is None:
del p[k]
elif v is not None:
p[k] = v
return mark_safe('?' + '&'.join([u'%s=%s' % (k, v) for k, v in p.items()]).replace(' ', '%20'))
def get_results(self, request):
paginator = QuerySetPaginator(self.query_set, self.lookup_opts.admin.list_per_page)
# Get the number of objects, with admin filters applied.
try:
result_count = paginator.count
# Naked except! Because we don't have any other way of validating
# "params". They might be invalid if the keyword arguments are
# incorrect, or if the values are not in the correct type (which would
# result in a database error).
except:
raise IncorrectLookupParameters
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.manager.count()
can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
multi_page = result_count > self.lookup_opts.admin.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = list(self.query_set)
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
result_list = ()
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def get_ordering(self):
lookup_opts, params = self.lookup_opts, self.params
# For ordering, first check the "ordering" parameter in the admin
# options, then check the object's default ordering. If neither of
# those exist, order descending by ID by default. Finally, look for
# manually-specified ordering from the query string.
ordering = lookup_opts.admin.ordering or lookup_opts.ordering or ['-' + lookup_opts.pk.name]
if ordering[0].startswith('-'):
order_field, order_type = ordering[0][1:], 'desc'
else:
order_field, order_type = ordering[0], 'asc'
if ORDER_VAR in params:
try:
field_name = lookup_opts.admin.list_display[int(params[ORDER_VAR])]
try:
f = lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
# see if field_name is a name of a non-field
# that allows sorting
try:
attr = getattr(lookup_opts.admin.manager.model, field_name)
order_field = attr.admin_order_field
except AttributeError:
pass
else:
if not isinstance(f.rel, models.ManyToOneRel) or not f.null:
order_field = f.name
except (IndexError, ValueError):
pass # Invalid ordering specified. Just use the default.
if ORDER_TYPE_VAR in params and params[ORDER_TYPE_VAR] in ('asc', 'desc'):
order_type = params[ORDER_TYPE_VAR]
return order_field, order_type
def get_query_set(self):
qs = self.manager.get_query_set()
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR):
if i in lookup_params:
del lookup_params[i]
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
# Apply lookup parameters from the query string.
qs = qs.filter(**lookup_params)
# Use select_related() if one of the list_display options is a field
# with a relationship.
if self.lookup_opts.admin.list_select_related:
qs = qs.select_related()
else:
for field_name in self.lookup_opts.admin.list_display:
try:
f = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(f.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.lookup_opts.admin.search_fields and self.query:
for bit in self.query.split():
or_queries = [models.Q(**{construct_search(field_name): bit}) for field_name in self.lookup_opts.admin.search_fields]
other_qs = QuerySet(self.model)
other_qs.dup_select_related(qs)
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
qs = qs & other_qs
if self.opts.one_to_one_field:
qs = qs.complex_filter(self.opts.one_to_one_field.rel.limit_choices_to)
return qs
def url_for_result(self, result):
return "%s/" % quote(getattr(result, self.pk_attname))
def change_list(request, app_label, model_name):
model = models.get_model(app_label, model_name)
if model is None:
raise Http404("App %r, model %r, not found" % (app_label, model_name))
if not request.user.has_perm(app_label + '.' + model._meta.get_change_permission()):
raise PermissionDenied
try:
cl = ChangeList(request, model)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in request.GET.keys():
return render_to_response('admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
c = template.RequestContext(request, {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
})
c.update({'has_add_permission': c['perms'][app_label][cl.opts.get_add_permission()]}),
return render_to_response(['admin/%s/%s/change_list.html' % (app_label, cl.opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'], context_instance=c)
change_list = staff_member_required(never_cache(change_list))
|
|
import os
import re
import json
import base64
import logging
import datetime
import time
import copy
import decimal
import cgi
import itertools
import StringIO
import subprocess
import gevent
import gevent.pool
import gevent.ssl
import numpy
import pymongo
from geventhttpclient import HTTPClient
from geventhttpclient.url import URL
import lxml.html
from PIL import Image
import dateutil.parser
import calendar
import pygeoip
import hashlib
from jsonschema import FormatChecker, Draft4Validator, FormatError
# not needed here but to ensure that installed
import strict_rfc3339, rfc3987, aniso8601
from lib import config, util_czarcoin
JSONRPC_API_REQUEST_TIMEOUT = 10 #in seconds
D = decimal.Decimal
def sanitize_eliteness(text):
#strip out html data to avoid XSS-vectors
return cgi.escape(lxml.html.document_fromstring(text).text_content())
#^ wrap in cgi.escape - see https://github.com/mitotic/graphterm/issues/5
def http_basic_auth_str(username, password):
"""Returns a Basic Auth string."""
authstr = 'Basic ' + str(base64.b64encode(('%s:%s' % (username, password)).encode('latin1')).strip())
return authstr
def is_valid_url(url, suffix='', allow_localhost=False, allow_no_protocol=False):
if url is None:
return False
regex = re.compile(
r'^https?://' if not allow_no_protocol else r'^(https?://)?' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)%s$' % (re.escape('%s') % suffix if suffix else ''), re.IGNORECASE)
if not allow_localhost:
if re.search(r'^https?://localhost', url, re.IGNORECASE) or re.search(r'^https?://127', url, re.IGNORECASE):
return None
return regex.search(url)
def assets_to_asset_pair(asset1, asset2):
base, quote = None, None
for quote_asset in config.QUOTE_ASSETS:
if asset1 == quote_asset or asset2 == quote_asset:
base, quote = (asset2, asset1) if asset1 == quote_asset else (asset1, asset2)
break
else:
base, quote = (asset1, asset2) if asset1 < asset2 else (asset2, asset1)
return (base, quote)
def call_jsonrpc_api(method, params=None, endpoint=None, auth=None, abort_on_error=False):
if not endpoint: endpoint = config.CZARPARTYD_RPC
if not auth: auth = config.CZARPARTYD_AUTH
if not params: params = {}
payload = {
"id": 0,
"jsonrpc": "2.0",
"method": method,
"params": params,
}
# debug the method
#print(method)
headers = {
'Content-Type': 'application/json; charset=UTF-8',
'Accept': 'application/json, text/javascript',
'Connection':'close', #no keepalive
}
if auth:
#auth should be a (username, password) tuple, if specified
headers['Authorization'] = http_basic_auth_str(auth[0], auth[1])
try:
u = URL(endpoint)
# debug the auth - bingo!
# print(auth[1])
client = HTTPClient.from_url(u, connection_timeout=JSONRPC_API_REQUEST_TIMEOUT,
network_timeout=JSONRPC_API_REQUEST_TIMEOUT)
r = client.post(u.request_uri, body=json.dumps(payload), headers=headers)
except Exception, e:
raise Exception("Got call_jsonrpc_api request error: %s" % e)
else:
if r.status_code != 200 and abort_on_error:
raise Exception("Bad status code returned from czarpartyd: '%s'. result body: '%s'." % (r.status_code, r.read()))
result = json.loads(r.read())
finally:
client.close()
if abort_on_error and 'error' in result:
raise Exception("Got back error from server: %s" % result['error'])
return result
def get_url(url, abort_on_error=False, is_json=True, fetch_timeout=5):
headers = { 'Connection':'close', } #no keepalive
try:
u = URL(url)
client_kwargs = {'connection_timeout': fetch_timeout, 'network_timeout': fetch_timeout, 'insecure': True}
if u.scheme == "https": client_kwargs['ssl_options'] = {'cert_reqs': gevent.ssl.CERT_NONE}
client = HTTPClient.from_url(u, **client_kwargs)
r = client.get(u.request_uri, headers=headers)
except Exception, e:
raise Exception("Got get_url request error: %s" % e)
else:
if r.status_code != 200 and abort_on_error:
raise Exception("Bad status code returned: '%s'. result body: '%s'." % (r.status_code, r.read()))
result = json.loads(r.read()) if is_json else r.read()
finally:
client.close()
return result
def get_address_cols_for_entity(entity):
if entity in ['debits', 'credits']:
return ['address',]
elif entity in ['issuances',]:
return ['issuer',]
elif entity in ['sends', 'dividends', 'bets', 'cancels', 'callbacks', 'orders', 'burns', 'broadcasts', 'czrpays']:
return ['source',]
#elif entity in ['order_matches', 'bet_matches']:
elif entity in ['order_matches', 'order_expirations', 'order_match_expirations',
'bet_matches', 'bet_expirations', 'bet_match_expirations']:
return ['tx0_address', 'tx1_address']
else:
raise Exception("Unknown entity type: %s" % entity)
def grouper(n, iterable, fillmissing=False, fillvalue=None):
#Modified from http://stackoverflow.com/a/1625013
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
data = itertools.izip_longest(*args, fillvalue=fillvalue)
if not fillmissing:
data = [[e for e in g if e != fillvalue] for g in data]
return data
def multikeysort(items, columns):
"""http://stackoverflow.com/a/1144405"""
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer)
def moving_average(samples, n=3) :
ret = numpy.cumsum(samples, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def weighted_average(value_weight_list):
"""Takes a list of tuples (value, weight) and returns weighted average as
calculated by Sum of all values * weights / Sum of all weights
http://bcdcspatial.blogspot.com/2010/08/simple-weighted-average-with-python.html
"""
numerator = sum([v * w for v,w in value_weight_list])
denominator = sum([w for v,w in value_weight_list])
if(denominator != 0):
return(float(numerator) / float(denominator))
else:
return None
def json_dthandler(obj):
if hasattr(obj, 'timetuple'): #datetime object
#give datetime objects to javascript as epoch ts in ms (i.e. * 1000)
return int(time.mktime(obj.timetuple())) * 1000
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj))
def get_block_indexes_for_dates(start_dt=None, end_dt=None):
"""Returns a 2 tuple (start_block, end_block) result for the block range that encompasses the given start_date
and end_date unix timestamps"""
mongo_db = config.mongo_db
if start_dt is None:
start_block_index = config.BLOCK_FIRST
else:
start_block = mongo_db.processed_blocks.find_one({"block_time": {"$lte": start_dt} }, sort=[("block_time", pymongo.DESCENDING)])
start_block_index = config.BLOCK_FIRST if not start_block else start_block['block_index']
if end_dt is None:
end_block_index = config.CURRENT_BLOCK_INDEX
else:
end_block = mongo_db.processed_blocks.find_one({"block_time": {"$gte": end_dt} }, sort=[("block_time", pymongo.ASCENDING)])
if not end_block:
end_block_index = mongo_db.processed_blocks.find_one(sort=[("block_index", pymongo.DESCENDING)])['block_index']
else:
end_block_index = end_block['block_index']
return (start_block_index, end_block_index)
def get_block_time(block_index):
"""TODO: implement result caching to avoid having to go out to the database"""
block = config.mongo_db.processed_blocks.find_one({"block_index": block_index })
if not block: return None
return block['block_time']
def decorate_message(message, for_txn_history=False):
#insert custom fields in certain events...
#even invalid actions need these extra fields for proper reporting to the client (as the reporting message
# is produced via PendingActionViewModel.calcText) -- however make it able to deal with the queried data not existing in this case
assert '_category' in message
mongo_db = config.mongo_db
if for_txn_history:
message['_command'] = 'insert' #history data doesn't include this
block_index = message['block_index'] if 'block_index' in message else message['tx1_block_index']
message['_block_time'] = get_block_time(block_index)
message['_tx_index'] = message['tx_index'] if 'tx_index' in message else message.get('tx1_index', None)
if message['_category'] in ['bet_expirations', 'order_expirations', 'bet_match_expirations', 'order_match_expirations']:
message['_tx_index'] = 0 #add tx_index to all entries (so we can sort on it secondarily in history view), since these lack it
if message['_category'] in ['credits', 'debits']:
#find the last balance change on record
bal_change = mongo_db.balance_changes.find_one({ 'address': message['address'], 'asset': message['asset'] },
sort=[("block_time", pymongo.DESCENDING)])
message['_quantity_normalized'] = abs(bal_change['quantity_normalized']) if bal_change else None
message['_balance'] = bal_change['new_balance'] if bal_change else None
message['_balance_normalized'] = bal_change['new_balance_normalized'] if bal_change else None
if message['_category'] in ['orders',] and message['_command'] == 'insert':
get_asset_info = mongo_db.tracked_assets.find_one({'asset': message['get_asset']})
give_asset_info = mongo_db.tracked_assets.find_one({'asset': message['give_asset']})
message['_get_asset_divisible'] = get_asset_info['divisible'] if get_asset_info else None
message['_give_asset_divisible'] = give_asset_info['divisible'] if give_asset_info else None
if message['_category'] in ['order_matches',] and message['_command'] == 'insert':
forward_asset_info = mongo_db.tracked_assets.find_one({'asset': message['forward_asset']})
backward_asset_info = mongo_db.tracked_assets.find_one({'asset': message['backward_asset']})
message['_forward_asset_divisible'] = forward_asset_info['divisible'] if forward_asset_info else None
message['_backward_asset_divisible'] = backward_asset_info['divisible'] if backward_asset_info else None
if message['_category'] in ['orders', 'order_matches',]:
message['_czr_below_dust_limit'] = (
('forward_asset' in message and message['forward_asset'] == config.CZR and message['forward_quantity'] <= config.ORDER_CZR_DUST_LIMIT_CUTOFF)
or ('backward_asset' in message and message['backward_asset'] == config.CZR and message['backward_quantity'] <= config.ORDER_CZR_DUST_LIMIT_CUTOFF)
)
if message['_category'] in ['dividends', 'sends', 'callbacks']:
asset_info = mongo_db.tracked_assets.find_one({'asset': message['asset']})
message['_divisible'] = asset_info['divisible'] if asset_info else None
if message['_category'] in ['issuances',]:
message['_quantity_normalized'] = util_czarcoin.normalize_quantity(message['quantity'], message['divisible'])
return message
def decorate_message_for_feed(msg, msg_data=None):
"""This function takes a message from czarpartyd's message feed and mutates it a bit to be suitable to be
sent through the czarblockd message feed to an end-client"""
if not msg_data:
msg_data = json.loads(msg['bindings'])
message = copy.deepcopy(msg_data)
message['_message_index'] = msg['message_index']
message['_command'] = msg['command']
message['_block_index'] = msg['block_index']
message['_block_time'] = get_block_time(msg['block_index'])
message['_category'] = msg['category']
message['_status'] = msg_data.get('status', 'valid')
message = decorate_message(message)
return message
def is_caught_up_well_enough_for_government_work():
"""We don't want to give users 525 errors or login errors if czarblockd/czarpartyd is in the process of
getting caught up, but we DO if czarblockd is either clearly out of date with the blockchain, or reinitializing its database"""
return config.CAUGHT_UP or (config.BLOCKCHAIN_SERVICE_LAST_BLOCK and config.CURRENT_BLOCK_INDEX >= config.BLOCKCHAIN_SERVICE_LAST_BLOCK - 1)
def stream_fetch(urls, completed_callback, urls_group_size=50, urls_group_time_spacing=0, max_fetch_size=4*1024,
fetch_timeout=1, is_json=True, per_request_complete_callback=None):
completed_urls = {}
def make_stream_request(url):
try:
u = URL(url)
client_kwargs = {'connection_timeout': fetch_timeout, 'network_timeout': fetch_timeout, 'insecure': True}
if u.scheme == "https": client_kwargs['ssl_options'] = {'cert_reqs': gevent.ssl.CERT_NONE}
client = HTTPClient.from_url(u, **client_kwargs)
r = client.get(u.request_uri, headers={'Connection':'close'})
except Exception, e:
data = (False, "Got exception: %s" % e)
else:
if r.status_code != 200:
data = (False, "Got non-successful response code of: %s" % r.status_code)
else:
try:
#read up to max_fetch_size
raw_data = r.read(max_fetch_size)
if is_json: #try to convert to JSON
try:
data = json.loads(raw_data)
except Exception, e:
data = (False, "Invalid JSON data: %s" % e)
else:
data = (True, data)
else: #keep raw
data = (True, raw_data)
except Exception, e:
data = (False, "Request error: %s" % e)
finally:
client.close()
if per_request_complete_callback:
per_request_complete_callback(url, data)
completed_urls[url] = data
if len(completed_urls) == len(urls): #all done, trigger callback
return completed_callback(completed_urls)
def process_group(group):
group_results = []
pool = gevent.pool.Pool(urls_group_size)
for url in group:
if not is_valid_url(url, allow_no_protocol=True):
completed_urls[url] = (False, "Invalid URL")
if len(completed_urls) == len(urls): #all done, trigger callback
return completed_callback(completed_urls)
else:
continue
assert url.startswith('http://') or url.startswith('https://')
pool.spawn(make_stream_request, url)
pool.join()
if not isinstance(urls, (list, tuple)):
urls = [urls,]
urls = list(set(urls)) #remove duplicates (so we only fetch any given URL, once)
groups = grouper(urls_group_size, urls)
for i in xrange(len(groups)):
#logging.debug("Stream fetching group %i of %i..." % (i, len(groups)))
group = groups[i]
if urls_group_time_spacing and i != 0:
gevent.spawn_later(urls_group_time_spacing * i, process_group, group)
#^ can leave to overlapping if not careful
else:
process_group(group) #should 'block' until each group processing is complete
def fetch_image(url, folder, filename, max_size=20*1024, formats=['png'], dimensions=(48, 48), fetch_timeout=1):
def make_data_dir(subfolder):
path = os.path.join(config.DATA_DIR, subfolder)
if not os.path.exists(path):
os.makedirs(path)
return path
try:
#fetch the image data
try:
u = URL(url)
client_kwargs = {'connection_timeout': fetch_timeout, 'network_timeout': fetch_timeout, 'insecure': True}
if u.scheme == "https": client_kwargs['ssl_options'] = {'cert_reqs': gevent.ssl.CERT_NONE}
client = HTTPClient.from_url(u, **client_kwargs)
r = client.get(u.request_uri, headers={'Connection':'close'})
raw_image_data = r.read(max_size) #read up to max_size
except Exception, e:
raise Exception("Got fetch_image request error: %s" % e)
else:
if r.status_code != 200:
raise Exception("Bad status code returned from fetch_image: '%s'" % (r.status_code))
finally:
client.close()
#decode image data
try:
image = Image.open(StringIO.StringIO(raw_image_data))
except Exception, e:
raise Exception("Unable to parse image data at: %s" % url)
if image.format.lower() not in formats: raise Exception("Image is not a PNG: %s (got %s)" % (url, image.format))
if image.size != dimensions: raise Exception("Image size is not 48x48: %s (got %s)" % (url, image.size))
if image.mode not in ['RGB', 'RGBA']: raise Exception("Image mode is not RGB/RGBA: %s (got %s)" % (url, image.mode))
imagePath = make_data_dir(folder)
imagePath = os.path.join(imagePath, filename + '.' + image.format.lower())
image.save(imagePath)
os.system("exiftool -q -overwrite_original -all= %s" % imagePath) #strip all metadata, just in case
return True
except Exception, e:
logging.warn(e)
return False
def date_param(strDate):
try:
return calendar.timegm(dateutil.parser.parse(strDate).utctimetuple())
except Exception, e:
return False
def parse_iso8601_interval(value):
try:
return aniso8601.parse_interval(value)
except Exception:
try:
return aniso8601.parse_repeating_interval(value)
except Exception:
raise FormatError('{} is not an iso8601 interval'.format(value))
def is_valid_json(data, schema):
checker = FormatChecker();
# add the "interval" format
checker.checks("interval")(parse_iso8601_interval)
validator = Draft4Validator(schema, format_checker=checker)
errors = []
for error in validator.iter_errors(data):
errors.append(error.message)
return errors
def next_interval_date(interval):
try:
generator = parse_iso8601_interval(interval)
except Exception, e:
return None
def ts(dt):
return time.mktime(dt.timetuple())
previous = None
next = generator.next()
now = datetime.datetime.now()
while ts(next) < ts(now) and next != previous:
try:
previous = next
next = generator.next()
except Exception, e:
break
if ts(next) < ts(now):
return None
else:
return next.isoformat()
def subprocess_cmd(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
def download_geoip_data():
logging.info("Checking/updating GeoIP.dat ...")
download = False;
data_path = os.path.join(config.DATA_DIR, 'GeoIP.dat')
if not os.path.isfile(data_path):
download = True
else:
one_week_ago = time.time() - 60*60*24*7
file_stat = os.stat(data_path)
if file_stat.st_ctime < one_week_ago:
download = True
if download:
logging.info("Downloading GeoIP.dat")
cmd = "cd {}; wget -N -q http://geolite.maxmind.com/download/geoip/database/GeoLiteCountry/GeoIP.dat.gz; gzip -dfq GeoIP.dat.gz".format(config.DATA_DIR)
subprocess_cmd(cmd)
else:
logging.info("GeoIP.dat OK")
def init_geoip():
download_geoip_data();
return pygeoip.GeoIP(os.path.join(config.DATA_DIR, 'GeoIP.dat'))
def block_cache(func):
def cached_function(*args, **kwargs):
function_signature = hashlib.sha256(func.__name__ + str(args) + str(kwargs)).hexdigest()
sql = "SELECT block_index FROM blocks ORDER BY block_index DESC LIMIT 1"
block_index = call_jsonrpc_api('sql', {'query': sql, 'bindings': []})['result'][0]['block_index']
cached_result = config.mongo_db.czarblockd_cache.find_one({'block_index': block_index, 'function': function_signature})
if not cached_result or config.TESTNET:
#logging.info("generate cache ({}, {}, {})".format(func.__name__, block_index, function_signature))
try:
result = func(*args, **kwargs)
config.mongo_db.czarblockd_cache.insert({
'block_index': block_index,
'function': function_signature,
'result': json.dumps(result)
})
return result
except Exception, e:
logging.exception(e)
else:
#logging.info("result from cache ({}, {}, {})".format(func.__name__, block_index, function_signature))
result = json.loads(cached_result['result'])
return result
return cached_function
def clean_block_cache(block_index):
#logging.info("clean block cache lower than {}".format(block_index))
config.mongo_db.czarblockd_cache.remove({'block_index': {'$lt': block_index}})
|
|
"""Support for the SpaceAPI."""
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_LOCATION,
ATTR_STATE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ADDRESS,
CONF_EMAIL,
CONF_ENTITY_ID,
CONF_SENSORS,
CONF_STATE,
CONF_URL,
)
import homeassistant.core as ha
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
ATTR_ADDRESS = "address"
ATTR_SPACEFED = "spacefed"
ATTR_CAM = "cam"
ATTR_STREAM = "stream"
ATTR_FEEDS = "feeds"
ATTR_CACHE = "cache"
ATTR_PROJECTS = "projects"
ATTR_RADIO_SHOW = "radio_show"
ATTR_LAT = "lat"
ATTR_LON = "lon"
ATTR_API = "api"
ATTR_CLOSE = "close"
ATTR_CONTACT = "contact"
ATTR_ISSUE_REPORT_CHANNELS = "issue_report_channels"
ATTR_LASTCHANGE = "lastchange"
ATTR_LOGO = "logo"
ATTR_NAME = "name"
ATTR_OPEN = "open"
ATTR_SENSORS = "sensors"
ATTR_SPACE = "space"
ATTR_UNIT = "unit"
ATTR_URL = "url"
ATTR_VALUE = "value"
ATTR_SENSOR_LOCATION = "location"
CONF_CONTACT = "contact"
CONF_HUMIDITY = "humidity"
CONF_ICON_CLOSED = "icon_closed"
CONF_ICON_OPEN = "icon_open"
CONF_ICONS = "icons"
CONF_IRC = "irc"
CONF_ISSUE_REPORT_CHANNELS = "issue_report_channels"
CONF_LOCATION = "location"
CONF_SPACEFED = "spacefed"
CONF_SPACENET = "spacenet"
CONF_SPACESAML = "spacesaml"
CONF_SPACEPHONE = "spacephone"
CONF_CAM = "cam"
CONF_STREAM = "stream"
CONF_M4 = "m4"
CONF_MJPEG = "mjpeg"
CONF_USTREAM = "ustream"
CONF_FEEDS = "feeds"
CONF_FEED_BLOG = "blog"
CONF_FEED_WIKI = "wiki"
CONF_FEED_CALENDAR = "calendar"
CONF_FEED_FLICKER = "flicker"
CONF_FEED_TYPE = "type"
CONF_FEED_URL = "url"
CONF_CACHE = "cache"
CONF_CACHE_SCHEDULE = "schedule"
CONF_PROJECTS = "projects"
CONF_RADIO_SHOW = "radio_show"
CONF_RADIO_SHOW_NAME = "name"
CONF_RADIO_SHOW_URL = "url"
CONF_RADIO_SHOW_TYPE = "type"
CONF_RADIO_SHOW_START = "start"
CONF_RADIO_SHOW_END = "end"
CONF_LOGO = "logo"
CONF_PHONE = "phone"
CONF_SIP = "sip"
CONF_KEYMASTERS = "keymasters"
CONF_KEYMASTER_NAME = "name"
CONF_KEYMASTER_IRC_NICK = "irc_nick"
CONF_KEYMASTER_PHONE = "phone"
CONF_KEYMASTER_EMAIL = "email"
CONF_KEYMASTER_TWITTER = "twitter"
CONF_TWITTER = "twitter"
CONF_FACEBOOK = "facebook"
CONF_IDENTICA = "identica"
CONF_FOURSQUARE = "foursquare"
CONF_ML = "ml"
CONF_JABBER = "jabber"
CONF_ISSUE_MAIL = "issue_mail"
CONF_SPACE = "space"
CONF_TEMPERATURE = "temperature"
DATA_SPACEAPI = "data_spaceapi"
DOMAIN = "spaceapi"
ISSUE_REPORT_CHANNELS = [CONF_EMAIL, CONF_ISSUE_MAIL, CONF_ML, CONF_TWITTER]
SENSOR_TYPES = [CONF_HUMIDITY, CONF_TEMPERATURE]
SPACEAPI_VERSION = "0.13"
URL_API_SPACEAPI = "/api/spaceapi"
LOCATION_SCHEMA = vol.Schema({vol.Optional(CONF_ADDRESS): cv.string})
SPACEFED_SCHEMA = vol.Schema(
{
vol.Optional(CONF_SPACENET): cv.boolean,
vol.Optional(CONF_SPACESAML): cv.boolean,
vol.Optional(CONF_SPACEPHONE): cv.boolean,
}
)
STREAM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_M4): cv.url,
vol.Optional(CONF_MJPEG): cv.url,
vol.Optional(CONF_USTREAM): cv.url,
}
)
FEED_SCHEMA = vol.Schema(
{vol.Optional(CONF_FEED_TYPE): cv.string, vol.Required(CONF_FEED_URL): cv.url}
)
FEEDS_SCHEMA = vol.Schema(
{
vol.Optional(CONF_FEED_BLOG): FEED_SCHEMA,
vol.Optional(CONF_FEED_WIKI): FEED_SCHEMA,
vol.Optional(CONF_FEED_CALENDAR): FEED_SCHEMA,
vol.Optional(CONF_FEED_FLICKER): FEED_SCHEMA,
}
)
CACHE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CACHE_SCHEDULE): cv.matches_regex(
r"(m.02|m.05|m.10|m.15|m.30|h.01|h.02|h.04|h.08|h.12|d.01)"
)
}
)
RADIO_SHOW_SCHEMA = vol.Schema(
{
vol.Required(CONF_RADIO_SHOW_NAME): cv.string,
vol.Required(CONF_RADIO_SHOW_URL): cv.url,
vol.Required(CONF_RADIO_SHOW_TYPE): cv.matches_regex(r"(mp3|ogg)"),
vol.Required(CONF_RADIO_SHOW_START): cv.string,
vol.Required(CONF_RADIO_SHOW_END): cv.string,
}
)
KEYMASTER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_KEYMASTER_NAME): cv.string,
vol.Optional(CONF_KEYMASTER_IRC_NICK): cv.string,
vol.Optional(CONF_KEYMASTER_PHONE): cv.string,
vol.Optional(CONF_KEYMASTER_EMAIL): cv.string,
vol.Optional(CONF_KEYMASTER_TWITTER): cv.string,
}
)
CONTACT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_EMAIL): cv.string,
vol.Optional(CONF_IRC): cv.string,
vol.Optional(CONF_ML): cv.string,
vol.Optional(CONF_PHONE): cv.string,
vol.Optional(CONF_TWITTER): cv.string,
vol.Optional(CONF_SIP): cv.string,
vol.Optional(CONF_FACEBOOK): cv.string,
vol.Optional(CONF_IDENTICA): cv.string,
vol.Optional(CONF_FOURSQUARE): cv.string,
vol.Optional(CONF_JABBER): cv.string,
vol.Optional(CONF_ISSUE_MAIL): cv.string,
vol.Optional(CONF_KEYMASTERS): vol.All(
cv.ensure_list, [KEYMASTER_SCHEMA], vol.Length(min=1)
),
},
required=False,
)
STATE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Inclusive(CONF_ICON_CLOSED, CONF_ICONS): cv.url,
vol.Inclusive(CONF_ICON_OPEN, CONF_ICONS): cv.url,
},
required=False,
)
SENSOR_SCHEMA = vol.Schema(
{vol.In(SENSOR_TYPES): [cv.entity_id], cv.string: [cv.entity_id]}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTACT): CONTACT_SCHEMA,
vol.Required(CONF_ISSUE_REPORT_CHANNELS): vol.All(
cv.ensure_list, [vol.In(ISSUE_REPORT_CHANNELS)]
),
vol.Optional(CONF_LOCATION): LOCATION_SCHEMA,
vol.Required(CONF_LOGO): cv.url,
vol.Required(CONF_SPACE): cv.string,
vol.Required(CONF_STATE): STATE_SCHEMA,
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_SENSORS): SENSOR_SCHEMA,
vol.Optional(CONF_SPACEFED): SPACEFED_SCHEMA,
vol.Optional(CONF_CAM): vol.All(
cv.ensure_list, [cv.url], vol.Length(min=1)
),
vol.Optional(CONF_STREAM): STREAM_SCHEMA,
vol.Optional(CONF_FEEDS): FEEDS_SCHEMA,
vol.Optional(CONF_CACHE): CACHE_SCHEMA,
vol.Optional(CONF_PROJECTS): vol.All(cv.ensure_list, [cv.url]),
vol.Optional(CONF_RADIO_SHOW): vol.All(
cv.ensure_list, [RADIO_SHOW_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Register the SpaceAPI with the HTTP interface."""
hass.data[DATA_SPACEAPI] = config[DOMAIN]
hass.http.register_view(APISpaceApiView)
return True
class APISpaceApiView(HomeAssistantView):
"""View to provide details according to the SpaceAPI."""
url = URL_API_SPACEAPI
name = "api:spaceapi"
@staticmethod
def get_sensor_data(hass, spaceapi, sensor):
"""Get data from a sensor."""
sensor_state = hass.states.get(sensor)
if not sensor_state:
return None
sensor_data = {ATTR_NAME: sensor_state.name, ATTR_VALUE: sensor_state.state}
if ATTR_SENSOR_LOCATION in sensor_state.attributes:
sensor_data[ATTR_LOCATION] = sensor_state.attributes[ATTR_SENSOR_LOCATION]
else:
sensor_data[ATTR_LOCATION] = spaceapi[CONF_SPACE]
# Some sensors don't have a unit of measurement
if ATTR_UNIT_OF_MEASUREMENT in sensor_state.attributes:
sensor_data[ATTR_UNIT] = sensor_state.attributes[ATTR_UNIT_OF_MEASUREMENT]
return sensor_data
@ha.callback
def get(self, request):
"""Get SpaceAPI data."""
hass = request.app["hass"]
spaceapi = dict(hass.data[DATA_SPACEAPI])
is_sensors = spaceapi.get("sensors")
location = {ATTR_LAT: hass.config.latitude, ATTR_LON: hass.config.longitude}
try:
location[ATTR_ADDRESS] = spaceapi[ATTR_LOCATION][CONF_ADDRESS]
except KeyError:
pass
except TypeError:
pass
state_entity = spaceapi["state"][ATTR_ENTITY_ID]
space_state = hass.states.get(state_entity)
if space_state is not None:
state = {
ATTR_OPEN: space_state.state != "off",
ATTR_LASTCHANGE: dt_util.as_timestamp(space_state.last_updated),
}
else:
state = {ATTR_OPEN: "null", ATTR_LASTCHANGE: 0}
try:
state[ATTR_ICON] = {
ATTR_OPEN: spaceapi["state"][CONF_ICON_OPEN],
ATTR_CLOSE: spaceapi["state"][CONF_ICON_CLOSED],
}
except KeyError:
pass
data = {
ATTR_API: SPACEAPI_VERSION,
ATTR_CONTACT: spaceapi[CONF_CONTACT],
ATTR_ISSUE_REPORT_CHANNELS: spaceapi[CONF_ISSUE_REPORT_CHANNELS],
ATTR_LOCATION: location,
ATTR_LOGO: spaceapi[CONF_LOGO],
ATTR_SPACE: spaceapi[CONF_SPACE],
ATTR_STATE: state,
ATTR_URL: spaceapi[CONF_URL],
}
try:
data[ATTR_CAM] = spaceapi[CONF_CAM]
except KeyError:
pass
try:
data[ATTR_SPACEFED] = spaceapi[CONF_SPACEFED]
except KeyError:
pass
try:
data[ATTR_STREAM] = spaceapi[CONF_STREAM]
except KeyError:
pass
try:
data[ATTR_FEEDS] = spaceapi[CONF_FEEDS]
except KeyError:
pass
try:
data[ATTR_CACHE] = spaceapi[CONF_CACHE]
except KeyError:
pass
try:
data[ATTR_PROJECTS] = spaceapi[CONF_PROJECTS]
except KeyError:
pass
try:
data[ATTR_RADIO_SHOW] = spaceapi[CONF_RADIO_SHOW]
except KeyError:
pass
if is_sensors is not None:
sensors = {}
for sensor_type in is_sensors:
sensors[sensor_type] = []
for sensor in spaceapi["sensors"][sensor_type]:
sensor_data = self.get_sensor_data(hass, spaceapi, sensor)
sensors[sensor_type].append(sensor_data)
data[ATTR_SENSORS] = sensors
return self.json(data)
|
|
__author__ = 'tom.bailey'
'''TB Animation Tools is a toolset for animators
*******************************************************************************
License and Copyright
Copyright 2015-Tom Bailey
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
send issues/ requests to brimblashman@gmail.com
visit tb-animator.blogspot.com for "stuff"
*******************************************************************************
'''
import os
import maya.cmds as cmds
import tb_UI as tb_UI
import pymel.core as pm
from tb_timeDragger import timeDragger
from tb_manipulators import manips
from tb_playback import playback
reload(tb_UI)
if not pm.optionVar(exists='playblast_folder'):
pm.optionVar(stringValue=('playblast_folder', "c:"))
script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in
def buttonPressed(name, *args):
print 'button?'
if args[0]:
_val = 1
else:
_val = 2
pm.optionVar(intValue=(str(name), _val))
def checkBox_pressed(name, *args):
pm.optionVar(intValue=(str(name), args[0]))
def set_option_dir(_name, _field, *args):
_filter = '*.dir'
_start_dir = pm.optionVar[_name]
_result = pm.fileDialog2(startingDirectory=_start_dir,
fileMode=3,
fileFilter=_filter,
dialogStyle=1,
okCaption='pick')[0]
pm.optionVar(stringValue=(_name, _result + "/"))
pm.textField(_field, edit=True, text=_result)
return _name
def intEntered(name, *args):
pm.optionVar(intValue=(str(name), args[0]))
def showUI():
"""A function to instantiate the pose manager window"""
return anim_optionWindow.showUI()
class anim_optionWindow(object):
@classmethod
def showUI(cls):
"""A function to instantiate the pose manager window"""
win = cls()
win.create()
return win
def __init__(self):
self.icon_path = ""
self.titleImage = os.path.join(script_dir, "../Icons/option_top.png")
self.titleImage_hvr = os.path.join(script_dir, "../Icons/option_top.png")
self.reminder_img = os.path.join(script_dir, "../Icons/arrow.png")
self.curve_btn_img = os.path.join(script_dir, "../Icons/curves.png")
self.transform_btn_img = os.path.join(script_dir, "../Icons/transform.png")
self.camera_btn_img = os.path.join(script_dir, "../Icons/tb_camera.png")
self.circle_btn_img = os.path.join(script_dir, "../Icons/circle.png")
self.folder_btn_img = os.path.join(script_dir, "../Icons/folder.png")
# dictionary for category layouts
self.categories = {}
self.window = 'anim_option_win'
self.bgc = [72.0 / 255.0, 79.0 / 255.0, 89.0 / 255.0]
self._width = 600
self._height = 450
self._sm_margin = 4
self._lrg_margin = 28
self.optionLayouts = []
# option variables
self.translate_optionVars = manips().translate_modes
def create(self):
if pm.window(self.window, exists=True):
pm.deleteUI(self.window, window=True)
self.window = pm.window(self.window, title='Animation options',
width=self._width,
height=self._height,
sizeable=True)
if pm.uiTemplate('animUI_template', exists=True):
pm.deleteUI('animUI_template', uiTemplate=True)
pm.uiTemplate('animUI_template')
cmds.button(defineTemplate='animUI_template', width=100, height=40, align='left')
cmds.frameLayout(defineTemplate='animUI_template',
borderVisible=True,
labelVisible=True,
)
cmds.textScrollList(defineTemplate='animUI_template',
backgroundColor=self.bgc
)
self._form_layout = pm.formLayout()
self.titleImage = pm.image(image=self.titleImage,
width=self._width,
height=75)
# apply ui template
pm.setUITemplate('animUI_template', pushTemplate=True)
# build main category form
self._menu_category(self._form_layout)
# key options layout
self.optionLayouts.append(self._keys_dialog_menu(self._form_layout))
# manipulator options
self.optionLayouts.append(self._manipulator_menu(self._form_layout))
# add the file options menu
self.optionLayouts.append(self._file_dialog_menu(self._form_layout))
# revert ui template
pm.setUITemplate(popTemplate=True)
# attach frames to main form
ac = []
af = []
# attach header image
af.append([self.titleImage, 'top', 0])
af.append([self.titleImage, 'left', self._sm_margin])
af.append([self.titleImage, 'right', self._sm_margin])
# attach left hand category menu
ac.append([self._cat_layout, 'top', 0, self.titleImage])
af.append([self._cat_layout, 'left', self._sm_margin])
af.append([self._cat_layout, 'bottom', self._sm_margin])
cmds.formLayout(
self._form_layout, e=True,
attachControl=ac, attachForm=af
)
# loop through the layouts
self.attach_main_forms(main_layout=self._form_layout,
layouts=self.optionLayouts,
top=self.titleImage,
left=self._cat_layout)
self.category_selected("manips_op")
self.window.show()
def attach_main_forms(self, main_layout, layouts=[], top=str(), left=str(), right=str(), bottom=str(),
margin=int(4)):
for _form in layouts:
ac = []
af = []
ac.append([str(_form), 'top', 0, top])
ac.append([str(_form), 'left', 0, left])
af.append([str(_form), 'right', margin])
af.append([str(_form), 'bottom', margin])
pm.formLayout(
main_layout,
edit=True,
attachControl=ac,
attachForm=af
)
def category_selected(self, _name, *args):
print 'category selected?', _name
current = self.categories[_name]
for keys in self.categories:
pm.frameLayout(self.categories[keys], edit=True, manage=(keys == _name))
def _rmb_menu(self, _parent):
self._rmb_layout = pm.frameLayout(label="Right click Menu options",
width=self._width - 16,
collapsable=True,
collapse=True,
parent=_parent)
def _optionCheckBox(self, _name, _label, _annotation):
_checkBox = pm.checkBox(_name, label=_label,
value=optionVar(query=_name),
annotation=_annotation,
changeCommand=lambda *args: checkBox_pressed(_name)
)
return _checkBox
def _categoryButton(self, name="", width=int(64), height=int(64), icon=self.circle_btn_img, parent=""):
_button = pm.symbolButton(annotation=name,
image=icon,
parent=parent,
width=width,
height=height,
command=lambda *args: self.category_selected(name, args[0])
)
return _button
def _optionRadioButton(self, _name, _label, _labelArray, _annotation):
_button = pm.radioButtonGrp(_name, numberOfRadioButtons=2,
label=_label,
select=pm.optionVar[_name],
annotation=_annotation,
labelArray2=_labelArray,
adjustableColumn=1,
columnAttach=[1, 'left', 0],
changeCommand1=lambda *args: buttonPressed(_name, args[0]))
return _button
def _int_option(self, _name, _label):
_field = pm.intField(_name,
value=pm.optionVar(query=_name),
enterCommand=lambda *args: intEntered(_name, args[0]))
return _field
# stupid fill out a form thing
@staticmethod
def attach_form(attach_form, form):
af = []
af.append([attach_form, 'top', 0])
af.append([attach_form, 'left', 0])
af.append([attach_form, 'right', 0])
af.append([attach_form, 'bottom', 0])
pm.formLayout(
form,
edit=True,
attachForm=af
)
def _menu_category(self, _parent):
self._cat_layout = pm.columnLayout()
self._cat_form = pm.formLayout()
file_option_btn = self._categoryButton(name="file_op", icon=self.folder_btn_img, parent=self._cat_layout)
keys_option_btn = self._categoryButton(name="keys_op", icon=self.curve_btn_img, parent=self._cat_layout)
manips_option_btn = self._categoryButton(name="manips_op", icon=self.transform_btn_img, parent=self._cat_layout)
# viewport_option_btn = self._categoryButton(name="view_op", icon=self.camera_btn_img, parent=self._cat_layout)
pm.setParent(_parent)
def _file_dialog_menu(self, _parent):
self._file_layout = pm.frameLayout(label="File settings", bv=False)
self._file_form = pm.formLayout()
_dialogStyle = ['OS native', 'Maya default']
playblast_folder_picker = tb_UI.folder_picker().create(parent=self._file_form,
label="playblast",
option_variable='tb_playblast_folder',
top_form=self._file_form
)
selections_folder_picker = tb_UI.folder_picker().create(parent=self._file_form,
label="quick select save directory",
option_variable='tb_qs_folder',
top_control=playblast_folder_picker,
top_form=self._file_form
)
pm.setParent(_parent)
self.categories["file_op"] = self._file_layout
return self._file_layout
def _manipulator_menu(self, _parent):
_manip_layout = pm.frameLayout(label="manipulator settings", bv=False)
_manip_form = pm.formLayout()
_dialogStyle = ['OS native', 'Maya default']
# _manip_columns = pm.columnLayout(columnAlign='center', bgc=(0.5,0.2,0.6))
translate_options = tb_UI.checkBox_group().create(label="cycle translate tool options",
parent=_manip_form,
variable=manips().translate_optionVar,
columns=4,
optionList=manips().translate_modes,
positionMenu=manips().translate_messageVar,
positionLabel=manips().translate_messageLabel,
messageMenu=True,
top_form=_manip_form
)
rotate_options = tb_UI.checkBox_group().create(label="cycle rotate tool options",
parent=_manip_form,
variable=manips().rotate_optionVar,
columns=4,
optionList=manips().rotate_modes,
positionMenu=manips().rotate_messageVar,
positionLabel=manips().rotate_messageLabel,
messageMenu=True,
top_control=translate_options,
top_form=_manip_form
)
time_drag_options = tb_UI.checkBox_group().create(label="smooth drag tool options",
parent=_manip_form,
variable=timeDragger().optionVar,
columns=4,
optionList=timeDragger().modes,
positionMenu=timeDragger().messagePos,
positionLabel="message position",
messageMenu=True,
top_control=rotate_options,
top_form=_manip_form
)
step_drag_options = tb_UI.checkBox_group().create(label="step drag tool options",
parent=_manip_form,
variable=timeDragger().step_optionVar,
columns=3,
optionList=timeDragger().step_modes,
intFieldLabel=timeDragger().step_label,
intField=timeDragger().step_var,
# positionMenu=timeDragger().messagePos,
# positionLabel="message position",
# messageMenu=True,
top_control=time_drag_options,
top_form=_manip_form
)
tumble_options = tb_UI.checkBox_group().create(label="camera pivot tool",
parent=_manip_form,
variable='tumbler_enabled',
columns=2,
optionList=['enabled'],
top_control=step_drag_options,
top_form=_manip_form
)
player = playback()
playback_options = tb_UI.option_group().create(label="evaluation manager override",
parent=_manip_form,
variable=[player.playbackModeOption, player.manipulationModeOption],
columns=2,
optionList=player.optionList,
top_control=tumble_options,
top_form=_manip_form
)
tb_UI.FormAttach().attach(_manip_layout, self._form_layout)
pm.setParent(_parent)
self.categories["manips_op"] = _manip_layout
return _manip_layout
def _keys_dialog_menu(self, _parent):
_keys_layout = pm.frameLayout(label="keyframe settings", bv=False)
_keys_form = pm.formLayout()
keyframe_options = tb_UI.checkBox_group().create(label="cycle keyframe type options",
parent=_keys_form,
variable=manips().key_optionVar,
columns=4,
optionList=manips().key_modes,
positionMenu=manips().key_messageVar,
positionLabel=manips().key_messageLabel,
messageMenu=True,
top_form=_keys_form
)
tb_UI.FormAttach().attach(_keys_layout, self._form_layout)
pm.setParent(_parent)
self.categories["keys_op"] = _keys_layout
return _keys_layout
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import random
from collections import OrderedDict
import gzip
import json
import numpy as np
import pandas as pd
from ruamel.yaml import YAML
from pymatgen import Molecule, Element, Lattice, Structure
from pymatgen.io.lammps.data import LammpsBox, LammpsData, Topology, \
ForceField, lattice_2_lmpbox, structure_2_lmpdata, CombinedData
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
"test_files", "lammps")
class LammpsBoxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsBox(bounds=[[36.840194, 64.211560],
[41.013691, 68.385058],
[29.768095, 57.139462]])
cls.quartz = LammpsBox(bounds=[[0, 4.913400],
[0, 4.255129],
[0, 5.405200]],
tilt=[-2.456700, 0.0, 0.0])
def test_volume(self):
obounds = np.array(self.peptide.bounds)
ov = np.prod(obounds[:, 1] - obounds[:, 0])
self.assertEqual(self.peptide.volume, ov)
self.assertAlmostEqual(self.quartz.volume, 113.00733165874873)
def test_get_string(self):
peptide = self.peptide.get_string(5)
peptide_5 = """36.84019 64.21156 xlo xhi
41.01369 68.38506 ylo yhi
29.76809 57.13946 zlo zhi"""
self.assertEqual(peptide, peptide_5)
quartz = self.quartz.get_string(4)
quartz_4 = """0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz"""
self.assertEqual(quartz, quartz_4)
def test_get_box_shift(self):
peptide = self.peptide
self.assertEqual(peptide.get_box_shift([1, 0, 0])[0],
64.211560 - 36.840194)
self.assertEqual(peptide.get_box_shift([0, 0, -1])[-1],
29.768095 - 57.139462)
quartz = self.quartz
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 0, 1]),
[0, 0, 5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([0, 1, -1]),
[-2.4567, 4.2551, -5.4052], 4)
np.testing.assert_array_almost_equal(quartz.get_box_shift([1, -1, 0]),
[4.9134 + 2.4567, -4.2551, 0], 4)
def test_to_lattice(self):
peptide = self.peptide.to_lattice()
np.testing.assert_array_almost_equal(peptide.abc, [27.371367] * 3)
self.assertTrue(peptide.is_orthogonal)
quartz = self.quartz.to_lattice()
np.testing.assert_array_almost_equal(quartz.matrix,
[[4.913400, 0, 0],
[-2.456700, 4.255129, 0],
[0, 0, 5.405200]])
class LammpsDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.peptide = LammpsData. \
from_file(filename=os.path.join(test_dir, "data.peptide"))
cls.ethane = LammpsData. \
from_file(filename=os.path.join(test_dir, "ethane.data"))
cls.quartz = LammpsData. \
from_file(filename=os.path.join(test_dir, "data.quartz"),
atom_style="atomic")
cls.virus = LammpsData. \
from_file(filename=os.path.join(test_dir, "virus.data"),
atom_style="angle")
cls.tatb = LammpsData. \
from_file(filename=os.path.join(test_dir, "tatb.data"),
atom_style="charge", sort_id=True)
def test_structure(self):
quartz = self.quartz.structure
np.testing.assert_array_equal(quartz.lattice.matrix,
[[4.913400, 0, 0],
[-2.456700, 4.255129, 0],
[0, 0, 5.405200]])
self.assertEqual(quartz.formula, "Si3 O6")
self.assertNotIn("molecule-ID", self.quartz.atoms.columns)
ethane = self.ethane.structure
np.testing.assert_array_equal(ethane.lattice.matrix,
np.diag([10.0] * 3))
lbounds = np.array(self.ethane.box.bounds)[:, 0]
coords = self.ethane.atoms[["x", "y", "z"]].values - lbounds
np.testing.assert_array_equal(ethane.cart_coords, coords)
np.testing.assert_array_equal(ethane.site_properties["charge"],
self.ethane.atoms["q"])
tatb = self.tatb.structure
frac_coords = tatb.frac_coords[381]
real_frac_coords = frac_coords - np.floor(frac_coords)
np.testing.assert_array_almost_equal(real_frac_coords,
[0.01553397,
0.71487872,
0.14134139])
co = Structure.from_spacegroup(194,
Lattice.hexagonal(2.50078, 4.03333),
["Co"], [[1 / 3, 2 / 3, 1 / 4]])
ld_co = LammpsData.from_structure(co)
self.assertEqual(ld_co.structure.composition.reduced_formula, "Co")
ni = Structure.from_spacegroup(225, Lattice.cubic(3.50804),
["Ni"], [[0, 0, 0]])
ld_ni = LammpsData.from_structure(ni)
self.assertEqual(ld_ni.structure.composition.reduced_formula, "Ni")
def test_get_string(self):
pep = self.peptide.get_string(distance=7, velocity=5, charge=4)
pep_lines = pep.split("\n")
pep_kws = ["Masses", "Pair Coeffs", "Bond Coeffs", "Angle Coeffs",
"Dihedral Coeffs", "Improper Coeffs", "Atoms",
"Velocities", "Bonds", "Angles", "Dihedrals", "Impropers"]
kw_inds = {l: i for i, l in enumerate(pep_lines) if l in pep_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)],
pep_kws)
# header
pep_header = "\n".join(pep_lines[:kw_inds["Masses"]])
pep_header_7 = """Generated by pymatgen.io.lammps.data.LammpsData
2004 atoms
1365 bonds
786 angles
207 dihedrals
12 impropers
14 atom types
18 bond types
31 angle types
21 dihedral types
2 improper types
36.8401940 64.2115600 xlo xhi
41.0136910 68.3850580 ylo yhi
29.7680950 57.1394620 zlo zhi
"""
self.assertEqual(pep_header, pep_header_7)
# int vs float for coeffs
pep_dihedral_coeff = pep_lines[kw_inds["Dihedral Coeffs"] + 2]
self.assertEqual(pep_dihedral_coeff, "1 0.200 1 180 1.0")
# distance and charge
pep_atom = pep_lines[kw_inds["Atoms"] + 2]
self.assertEqual(pep_atom, "1 1 1 0.5100 43.9999300 "
"58.5267800 36.7855000 0 0 0")
# velocity
pep_velo = pep_lines[kw_inds["Velocities"] + 2]
self.assertEqual(pep_velo, "1 -0.00067 -0.00282 0.00383")
# no floats in topology sections
pep_topos = "\n".join(pep_lines[kw_inds["Bonds"]:])
self.assertNotIn(".", pep_topos)
c2h6 = self.ethane.get_string(distance=5, charge=3)
c2h6_lines = c2h6.split("\n")
c2h6_kws = ["Masses", "Pair Coeffs", "Bond Coeffs", "Angle Coeffs",
"Dihedral Coeffs", "Improper Coeffs", "BondBond Coeffs",
"BondAngle Coeffs", "MiddleBondTorsion Coeffs",
"EndBondTorsion Coeffs", "AngleTorsion Coeffs",
"AngleAngleTorsion Coeffs", "BondBond13 Coeffs",
"AngleAngle Coeffs", "Atoms", "Bonds", "Angles",
"Dihedrals", "Impropers"]
kw_inds = {l: i for i, l in enumerate(c2h6_lines) if l in c2h6_kws}
# section sequence
self.assertListEqual([k for k in sorted(kw_inds, key=kw_inds.get)],
c2h6_kws)
# header
c2h6_header = "\n".join(c2h6_lines[:kw_inds["Masses"]])
c2h6_header_5 = """Generated by pymatgen.io.lammps.data.LammpsData
8 atoms
7 bonds
12 angles
9 dihedrals
8 impropers
2 atom types
2 bond types
2 angle types
1 dihedral types
2 improper types
0.21455 10.21454 xlo xhi
0.11418 10.11418 ylo yhi
-10.00014 -0.00015 zlo zhi
"""
self.assertEqual(c2h6_header, c2h6_header_5)
# distance and charge
c2h6_atom = c2h6_lines[kw_inds["Atoms"] + 2]
self.assertEqual(c2h6_atom, "1 1 1 -0.080 4.46291 5.14833 -5.00041"
" 0 0 0")
# no floats in topology sections
c2h6_topos = "\n".join(c2h6_lines[kw_inds["Bonds"]:])
self.assertNotIn(".", c2h6_topos)
quartz = self.quartz.get_string(distance=4)
quartz_lines = quartz.split("\n")
quartz_kws = ["Masses", "Atoms"]
kw_inds = {l: i for i, l in enumerate(quartz_lines) if l in quartz_kws}
# header
quartz_header = "\n".join(quartz_lines[:kw_inds["Masses"]])
quartz_header_4 = """Generated by pymatgen.io.lammps.data.LammpsData
9 atoms
2 atom types
0.0000 4.9134 xlo xhi
0.0000 4.2551 ylo yhi
0.0000 5.4052 zlo zhi
-2.4567 0.0000 0.0000 xy xz yz
"""
self.assertEqual(quartz_header, quartz_header_4)
# distance
quartz_atom = quartz_lines[kw_inds["Atoms"] + 2]
self.assertEqual(quartz_atom, "1 1 2.3088 0.0000 3.6035")
virus = self.virus.get_string()
virus_lines = virus.split("\n")
pairij_coeff = virus_lines[virus_lines.index("PairIJ Coeffs") + 5]
self.assertEqual(pairij_coeff.strip(), "1 4 1 1.000 1.12250")
def test_write_file(self):
filename1 = "test1.data"
self.ethane.write_file(filename=filename1)
c2h6 = LammpsData.from_file(filename1)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff_kw = random.sample(self.ethane.force_field.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.force_field[ff_kw],
self.ethane.force_field[ff_kw], ff_kw)
topo_kw = random.sample(self.ethane.topology.keys(), 1)[0]
pd.testing.assert_frame_equal(c2h6.topology[topo_kw],
self.ethane.topology[topo_kw], topo_kw)
filename2 = "test2.data"
self.virus.write_file(filename=filename2)
v = LammpsData.from_file(filename2, atom_style="angle")
pd.testing.assert_frame_equal(v.force_field["PairIJ Coeffs"],
self.virus.force_field["PairIJ Coeffs"])
def test_disassemble(self):
# general tests
c = LammpsData.from_file(os.path.join(test_dir, "crambin.data"))
_, c_ff, topos = c.disassemble()
mass_info = [('N1', 14.0067), ('H1', 1.00797), ('C1', 12.01115),
('H2', 1.00797), ('C2', 12.01115), ('O1', 15.9994),
('C3', 12.01115), ('O2', 15.9994), ('H3', 1.00797),
('C4', 12.01115), ('N2', 14.0067), ('C5', 12.01115),
('S1', 32.064), ('C6', 12.01115), ('N3', 14.0067),
('C7', 12.01115), ('C8', 12.01115), ('C9', 12.01115),
('O3', 15.9994)]
self.assertListEqual(c_ff.mass_info, mass_info)
np.testing.assert_array_equal(c_ff.nonbond_coeffs,
c.force_field["Pair Coeffs"].values)
base_kws = ["Bond", "Angle", "Dihedral", "Improper"]
for kw in base_kws:
ff_kw = kw + " Coeffs"
i = random.randint(0, len(c_ff.topo_coeffs[ff_kw]) - 1)
sample_coeff = c_ff.topo_coeffs[ff_kw][i]
np.testing. \
assert_array_equal(sample_coeff["coeffs"],
c.force_field[ff_kw].iloc[i].values,
ff_kw)
topo = topos[-1]
atoms = c.atoms[c.atoms["molecule-ID"] == 46]
np.testing.assert_array_equal(topo.sites.cart_coords,
atoms[["x", "y", "z"]])
np.testing.assert_array_equal(topo.charges, atoms["q"])
atom_labels = [m[0] for m in mass_info]
self.assertListEqual(topo.sites.site_properties["ff_map"],
[atom_labels[i - 1] for i in atoms["type"]])
shift = min(atoms.index)
for kw in base_kws:
ff_kw = kw + " Coeffs"
ff_coeffs = c_ff.topo_coeffs[ff_kw]
topo_kw = kw + "s"
topos_df = c.topology[topo_kw]
topo_df = topos_df[topos_df["atom1"] >= shift]
topo_arr = topo_df.drop("type", axis=1).values
np.testing.assert_array_equal(topo.topologies[topo_kw],
topo_arr - shift, topo_kw)
sample_topo = random.sample(list(topo_df.itertuples(False, None)),
1)[0]
topo_type_idx = sample_topo[0] - 1
topo_type = tuple([atom_labels[i - 1] for i in
atoms.loc[sample_topo[1:], "type"]])
self.assertIn(topo_type, ff_coeffs[topo_type_idx]["types"], ff_kw)
# test no guessing element and pairij as nonbond coeffs
v = self.virus
_, v_ff, _ = v.disassemble(guess_element=False)
self.assertDictEqual(v_ff.maps["Atoms"],
dict(Qa1=1, Qb1=2, Qc1=3, Qa2=4))
pairij_coeffs = v.force_field["PairIJ Coeffs"].drop(["id1", "id2"],
axis=1)
np.testing.assert_array_equal(v_ff.nonbond_coeffs,
pairij_coeffs.values)
# test class2 ff
_, e_ff, _ = self.ethane.disassemble()
e_topo_coeffs = e_ff.topo_coeffs
for k in ["BondBond Coeffs", "BondAngle Coeffs"]:
self.assertIn(k, e_topo_coeffs["Angle Coeffs"][0], k)
for k in ["MiddleBondTorsion Coeffs", "EndBondTorsion Coeffs",
"AngleTorsion Coeffs", "AngleAngleTorsion Coeffs",
"BondBond13 Coeffs"]:
self.assertIn(k, e_topo_coeffs["Dihedral Coeffs"][0], k)
self.assertIn("AngleAngle Coeffs",
e_topo_coeffs["Improper Coeffs"][0])
def test_from_file(self):
# general tests
pep = self.peptide
# header stats and Nos. of columns
self.assertEqual(pep.masses.shape, (14, 1))
self.assertEqual(pep.atoms.shape, (2004, 9))
self.assertListEqual(list(pep.atoms.columns),
["molecule-ID", "type", "q", "x", "y", "z",
"nx", "ny", "nz"])
topo = pep.topology
self.assertEqual(topo["Bonds"].shape, (1365, 3))
self.assertEqual(topo["Angles"].shape, (786, 4))
self.assertEqual(topo["Dihedrals"].shape, (207, 5))
self.assertEqual(topo["Impropers"].shape, (12, 5))
ff = pep.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (14, 4))
self.assertEqual(ff["Bond Coeffs"].shape, (18, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (31, 4))
self.assertEqual(ff["Dihedral Coeffs"].shape, (21, 4))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 2))
# header box
np.testing.assert_array_equal(pep.box.bounds,
[[36.840194, 64.211560],
[41.013691, 68.385058],
[29.768095, 57.139462]])
# body
self.assertEqual(pep.masses.at[7, "mass"], 12.0110)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff3"], 0.152100)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.430000)
self.assertEqual(ff["Angle Coeffs"].at[21, "coeff2"], 120.000000)
self.assertEqual(ff["Dihedral Coeffs"].at[10, "coeff1"], 0.040000)
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 20.000000)
self.assertEqual(pep.atoms.at[29, "molecule-ID"], 1)
self.assertEqual(pep.atoms.at[29, "type"], 7)
self.assertEqual(pep.atoms.at[29, "q"], -0.020)
self.assertAlmostEqual(pep.atoms.at[29, "x"], 42.96709)
self.assertEqual(pep.atoms.at[1808, "molecule-ID"], 576)
self.assertEqual(pep.atoms.at[1808, "type"], 14)
self.assertAlmostEqual(pep.atoms.at[1808, "y"], 58.64352)
self.assertEqual(pep.atoms.at[1808, "nx"], -1)
self.assertAlmostEqual(pep.velocities.at[527, "vz"], -0.010889)
self.assertEqual(topo["Bonds"].at[47, "type"], 8)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 54)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 1384)
self.assertEqual(topo["Angles"].at[105, "type"], 19)
self.assertEqual(topo["Angles"].at[105, "atom3"], 51)
self.assertEqual(topo["Angles"].at[376, "atom2"], 772)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 14)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 51)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 32)
# class 2 and comments
ethane = self.ethane
self.assertEqual(ethane.masses.shape, (2, 1))
self.assertEqual(ethane.atoms.shape, (8, 9))
class2 = ethane.force_field
self.assertEqual(class2["Pair Coeffs"].shape, (2, 2))
self.assertEqual(class2["Bond Coeffs"].shape, (2, 4))
self.assertEqual(class2["Angle Coeffs"].shape, (2, 4))
self.assertEqual(class2["Dihedral Coeffs"].shape, (1, 6))
self.assertEqual(class2["Improper Coeffs"].shape, (2, 2))
self.assertEqual(class2["BondBond Coeffs"].at[2, "coeff3"], 1.1010)
self.assertEqual(class2["BondAngle Coeffs"].at[2, "coeff4"], 1.1010)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"],
107.6600)
self.assertEqual(class2["AngleAngle Coeffs"].at[2, "coeff6"],
107.6600)
self.assertEqual(class2["AngleAngleTorsion Coeffs"].at[1, "coeff3"],
110.7700)
self.assertEqual(class2["EndBondTorsion Coeffs"].at[1, "coeff8"],
1.1010)
self.assertEqual(class2["MiddleBondTorsion Coeffs"].at[1, "coeff4"],
1.5300)
self.assertEqual(class2["BondBond13 Coeffs"].at[1, "coeff3"], 1.1010)
self.assertEqual(class2["AngleTorsion Coeffs"].at[1, "coeff8"],
110.7700)
# tilt box and another atom_style
quartz = self.quartz
np.testing.assert_array_equal(quartz.box.tilt, [-2.456700, 0.0, 0.0])
self.assertListEqual(list(quartz.atoms.columns),
["type", "x", "y", "z"])
self.assertAlmostEqual(quartz.atoms.at[7, "x"], 0.299963)
# PairIJ Coeffs section
virus = self.virus
pairij = virus.force_field["PairIJ Coeffs"]
self.assertEqual(pairij.at[7, "id1"], 3)
self.assertEqual(pairij.at[7, "id2"], 3)
self.assertEqual(pairij.at[7, "coeff2"], 2.1)
# sort_id
atom_id = random.randint(1, 384)
self.assertEqual(self.tatb.atoms.loc[atom_id].name, atom_id)
def test_from_ff_and_topologies(self):
mass = OrderedDict()
mass["H"] = 1.0079401
mass["O"] = 15.999400
nonbond_coeffs = [[0.00774378, 0.98], [0.1502629, 3.1169]]
topo_coeffs = {"Bond Coeffs": [{"coeffs": [176.864, 0.9611],
"types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712],
"types": [("H", "O", "H")]}]}
ff = ForceField(mass.items(), nonbond_coeffs, topo_coeffs)
with gzip.open(os.path.join(test_dir, "topologies_ice.json.gz")) as f:
topo_dicts = json.load(f)
topologies = [Topology.from_dict(d) for d in topo_dicts]
box = LammpsBox([[-0.75694412, 44.165558],
[0.38127473, 47.066074],
[0.17900842, 44.193867]])
ice = LammpsData.from_ff_and_topologies(box=box, ff=ff,
topologies=topologies)
atoms = ice.atoms
bonds = ice.topology["Bonds"]
angles = ice.topology["Angles"]
np.testing.assert_array_equal(atoms.index.values,
np.arange(1, len(atoms) + 1))
np.testing.assert_array_equal(bonds.index.values,
np.arange(1, len(bonds) + 1))
np.testing.assert_array_equal(angles.index.values,
np.arange(1, len(angles) + 1))
i = random.randint(0, len(topologies) - 1)
sample = topologies[i]
in_atoms = ice.atoms[ice.atoms["molecule-ID"] == i + 1]
np.testing.assert_array_equal(in_atoms.index.values,
np.arange(3 * i + 1, 3 * i + 4))
np.testing.assert_array_equal(in_atoms["type"].values, [2, 1, 1])
np.testing.assert_array_equal(in_atoms["q"].values, sample.charges)
np.testing.assert_array_equal(in_atoms[["x", "y", "z"]].values,
sample.sites.cart_coords)
broken_topo_coeffs = {"Bond Coeffs": [{"coeffs": [176.864, 0.9611],
"types": [("H", "O")]}],
"Angle Coeffs": [{"coeffs": [42.1845, 109.4712],
"types": [("H", "H", "H")]}]}
broken_ff = ForceField(mass.items(), nonbond_coeffs,
broken_topo_coeffs)
ld_woangles = LammpsData.from_ff_and_topologies(box=box, ff=broken_ff,
topologies=[sample])
self.assertNotIn("Angles", ld_woangles.topology)
def test_from_structure(self):
latt = Lattice.monoclinic(9.78746, 4.75058, 8.95892, 115.9693)
structure = Structure.from_spacegroup(15, latt, ["Os", "O", "O"],
[[0, 0.25583, 0.75],
[0.11146, 0.46611, 0.91631],
[0.11445, 0.04564, 0.69518]])
velocities = np.random.randn(20, 3) * 0.1
structure.add_site_property("velocities", velocities)
ld = LammpsData.from_structure(structure=structure,
ff_elements=["O", "Os", "Na"])
i = random.randint(0, 19)
a = latt.matrix[0]
va = velocities[i].dot(a) / np.linalg.norm(a)
self.assertAlmostEqual(va, ld.velocities.loc[i + 1, "vx"])
self.assertAlmostEqual(velocities[i, 1],
ld.velocities.loc[i + 1, "vy"])
np.testing.assert_array_almost_equal(ld.masses["mass"],
[22.989769, 190.23, 15.9994])
np.testing.assert_array_equal(ld.atoms["type"], [2] * 4 + [3] * 16)
def test_json_dict(self):
encoded = json.dumps(self.ethane.as_dict())
decoded = json.loads(encoded)
c2h6 = LammpsData.from_dict(decoded)
pd.testing.assert_frame_equal(c2h6.masses, self.ethane.masses)
pd.testing.assert_frame_equal(c2h6.atoms, self.ethane.atoms)
ff = self.ethane.force_field
key, target_df = random.sample(ff.items(), 1)[0]
self.assertIsNone(
pd.testing.assert_frame_equal(c2h6.force_field[key], target_df,
check_dtype=False),
key
)
topo = self.ethane.topology
key, target_df = random.sample(topo.items(), 1)[0]
self.assertIsNone(
pd.testing.assert_frame_equal(c2h6.topology[key], target_df),
key
)
@classmethod
def tearDownClass(cls):
tmpfiles = ["test1.data", "test2.data"]
for t in tmpfiles:
if os.path.exists(t):
os.remove(t)
class TopologyTest(unittest.TestCase):
def test_init(self):
inner_charge = np.random.rand(10) - 0.5
outer_charge = np.random.rand(10) - 0.5
inner_velo = np.random.rand(10, 3) - 0.5
outer_velo = np.random.rand(10, 3) - 0.5
m = Molecule(["H"] * 10, np.random.rand(10, 3) * 100,
site_properties={"ff_map": ["D"] * 10,
"charge": inner_charge,
"velocities": inner_velo})
# q and v from site properties, while type from species_string
topo = Topology(sites=m)
self.assertListEqual(topo.type_by_sites, ["H"] * 10)
np.testing.assert_array_equal(topo.charges, inner_charge)
np.testing.assert_array_equal(topo.velocities, inner_velo)
# q and v from overriding, while type from site property
topo_override = Topology(sites=m, ff_label="ff_map",
charges=outer_charge,
velocities=outer_velo)
self.assertListEqual(topo_override.type_by_sites, ["D"] * 10)
np.testing.assert_array_equal(topo_override.charges, outer_charge)
np.testing.assert_array_equal(topo_override.velocities, outer_velo)
# test using a list of sites instead of SiteCollection
topo_from_list = Topology(sites=m.sites)
self.assertListEqual(topo_from_list.type_by_sites, topo.type_by_sites)
np.testing.assert_array_equal(topo_from_list.charges, topo.charges)
np.testing.assert_array_equal(topo_from_list.velocities,
topo.velocities)
def test_from_bonding(self):
# He: no bonding topologies
helium = Molecule(["He"], [[0, 0, 0]])
topo_he = Topology.from_bonding(molecule=helium)
self.assertIsNone(topo_he.topologies)
# H2: 1 bond only
hydrogen = Molecule(["H"] * 2, [[0, 0, 0], [0, 0, 0.7414]])
topo_h = Topology.from_bonding(molecule=hydrogen)
tp_h = topo_h.topologies
self.assertListEqual(tp_h["Bonds"], [[0, 1]])
self.assertNotIn("Angles", tp_h)
self.assertNotIn("Dihedrals", tp_h)
# water: 2 bonds and 1 angle only
water = Molecule(["O", "H", "H"], [[0.0000, 0.0000, 0.1173],
[0.0000, 0.7572, -0.4692],
[0.0000, -0.7572, -0.4692]])
topo_water = Topology.from_bonding(molecule=water)
tp_water = topo_water.topologies
self.assertListEqual(tp_water["Bonds"], [[0, 1], [0, 2]])
self.assertListEqual(tp_water["Angles"], [[1, 0, 2]])
self.assertNotIn("Dihedrals", tp_water)
# EtOH
etoh = Molecule(["C", "C", "O", "H", "H", "H", "H", "H", "H"],
[[1.1879, -0.3829, 0.0000],
[0.0000, 0.5526, 0.0000],
[-1.1867, -0.2472, 0.0000],
[-1.9237, 0.3850, 0.0000],
[2.0985, 0.2306, 0.0000],
[1.1184, -1.0093, 0.8869],
[1.1184, -1.0093, -0.8869],
[-0.0227, 1.1812, 0.8852],
[-0.0227, 1.1812, -0.8852]])
topo_etoh = Topology.from_bonding(molecule=etoh)
tp_etoh = topo_etoh.topologies
self.assertEqual(len(tp_etoh["Bonds"]), 8)
etoh_bonds = [[0, 1], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 7], [1, 8], [2, 3]]
np.testing.assert_array_equal(tp_etoh["Bonds"], etoh_bonds)
self.assertEqual(len(tp_etoh["Angles"]), 13)
etoh_angles = [[1, 0, 4], [1, 0, 5], [1, 0, 6], [4, 0, 5], [4, 0, 6],
[5, 0, 6], [0, 1, 2], [0, 1, 7], [0, 1, 8], [2, 1, 7],
[2, 1, 8], [7, 1, 8], [1, 2, 3]]
np.testing.assert_array_equal(tp_etoh["Angles"], etoh_angles)
self.assertEqual(len(tp_etoh["Dihedrals"]), 12)
etoh_dihedrals = [[4, 0, 1, 2], [4, 0, 1, 7], [4, 0, 1, 8],
[5, 0, 1, 2], [5, 0, 1, 7], [5, 0, 1, 8],
[6, 0, 1, 2], [6, 0, 1, 7], [6, 0, 1, 8],
[0, 1, 2, 3], [7, 1, 2, 3], [8, 1, 2, 3]]
np.testing.assert_array_equal(tp_etoh["Dihedrals"], etoh_dihedrals)
self.assertIsNotNone(json.dumps(topo_etoh.as_dict()))
# bond flag to off
topo_etoh0 = Topology.from_bonding(molecule=etoh, bond=False,
angle=True, dihedral=True)
self.assertIsNone(topo_etoh0.topologies)
# angle or dihedral flag to off
topo_etoh1 = Topology.from_bonding(molecule=etoh, angle=False)
self.assertNotIn("Angles", topo_etoh1.topologies)
topo_etoh2 = Topology.from_bonding(molecule=etoh, dihedral=False)
self.assertNotIn("Dihedrals", topo_etoh2.topologies)
class ForceFieldTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
mass_info = [("A", "H"), ("B", Element("C")),
("C", Element("O")), ("D", 1.00794)]
nonbond_coeffs = [[1, 1, 1.1225], [1, 1.175, 1.31894],
[1, 1.55, 1.73988], [1, 1, 1.1225],
[1, 1.35, 4], [1, 1.725, 1.93631],
[1, 1.175, 1.31894], [1, 2.1, 4],
[1, 1.55, 1.73988], [1, 1, 1.1225]]
topo_coeffs = {"Bond Coeffs": [{"coeffs": [50, 0.659469],
"types": [("A", "B"), ("C", "D")]},
{"coeffs": [50, 0.855906],
"types": [("B", "C")]}]}
cls.virus = ForceField(mass_info=mass_info,
nonbond_coeffs=nonbond_coeffs,
topo_coeffs=topo_coeffs)
cls.ethane = ForceField.from_file(os.path.join(test_dir,
"ff_ethane.yaml"))
def test_init(self):
v = self.virus
self.assertListEqual(v.mass_info, [("A", 1.00794), ("B", 12.0107),
("C", 15.9994), ("D", 1.00794)])
self.assertEqual(v.masses.at[3, "mass"], 15.9994)
v_ff = v.force_field
self.assertNotIn("Pair Coeffs", v_ff)
self.assertEqual(v_ff["PairIJ Coeffs"].iat[5, 4], 1.93631)
self.assertEqual(v_ff["Bond Coeffs"].at[2, "coeff2"], 0.855906)
v_maps = v.maps
self.assertDictEqual(v_maps["Atoms"],
{"A": 1, "B": 2, "C": 3, "D": 4})
self.assertDictEqual(v_maps["Bonds"],
{("A", "B"): 1, ("C", "D"): 1,
("B", "A"): 1, ("D", "C"): 1,
("B", "C"): 2, ("C", "B"): 2})
e = self.ethane
self.assertEqual(e.masses.at[1, "mass"], 12.01115)
e_ff = e.force_field
self.assertNotIn("PairIJ Coeffs", e_ff)
self.assertEqual(e_ff["Pair Coeffs"].at[1, "coeff2"], 3.854)
self.assertEqual(e_ff["Bond Coeffs"].at[2, "coeff4"], 844.6)
self.assertEqual(e_ff["Angle Coeffs"].at[2, "coeff4"], -2.4318)
self.assertEqual(e_ff["Dihedral Coeffs"].at[1, "coeff1"], -0.1432)
self.assertEqual(e_ff["Improper Coeffs"].at[2, "coeff2"], 0.0)
self.assertEqual(e_ff["BondBond Coeffs"].at[2, "coeff1"], 5.3316)
self.assertEqual(e_ff["BondAngle Coeffs"].at[1, "coeff3"], 1.53)
self.assertEqual(e_ff["MiddleBondTorsion Coeffs"].at[1, "coeff1"],
-14.261)
self.assertEqual(e_ff["EndBondTorsion Coeffs"].at[1, "coeff1"], 0.213)
self.assertEqual(e_ff["AngleTorsion Coeffs"].at[1, "coeff3"], -0.2466)
self.assertEqual(e_ff["AngleAngleTorsion Coeffs"].at[1, "coeff1"],
-12.564)
self.assertEqual(e_ff["BondBond13 Coeffs"].at[1, "coeff1"], 0.0)
self.assertEqual(e_ff["AngleAngle Coeffs"].at[1, "coeff2"], -0.4825)
e_maps = e.maps
self.assertDictEqual(e_maps["Atoms"], {"c4": 1, "h1": 2})
self.assertDictEqual(e_maps["Bonds"], {("c4", "c4"): 1,
("c4", "h1"): 2,
("h1", "c4"): 2})
self.assertDictEqual(e_maps["Angles"], {("c4", "c4", "h1"): 1,
("h1", "c4", "c4"): 1,
("h1", "c4", "h1"): 2})
self.assertEqual(e_maps["Impropers"], {("c4", "c4", "h1", "h1"): 1,
("c4", "h1", "c4", "h1"): 1,
("h1", "h1", "c4", "c4"): 1,
("h1", "c4", "h1", "c4"): 1,
("h1", "c4", "h1", "h1"): 2,
("h1", "h1", "c4", "h1"): 2})
def test_to_file(self):
filename = "ff_test.yaml"
v = self.virus
v.to_file(filename=filename)
yaml = YAML(typ="safe")
with open(filename, "r") as f:
d = yaml.load(f)
self.assertListEqual(d["mass_info"], [list(m) for m in v.mass_info])
self.assertListEqual(d["nonbond_coeffs"], v.nonbond_coeffs)
def test_from_file(self):
e = self.ethane
self.assertListEqual(e.mass_info, [("c4", 12.01115), ("h1", 1.00797)])
np.testing.assert_array_equal(e.nonbond_coeffs, [[0.062, 3.854],
[0.023, 2.878]])
e_tc = e.topo_coeffs
self.assertIn("Bond Coeffs", e_tc)
self.assertIn("BondAngle Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("BondBond Coeffs", e_tc["Angle Coeffs"][0])
self.assertIn("AngleAngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("BondBond13 Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("EndBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("MiddleBondTorsion Coeffs", e_tc["Dihedral Coeffs"][0])
self.assertIn("AngleAngle Coeffs", e_tc["Improper Coeffs"][0])
def test_from_dict(self):
d = self.ethane.as_dict()
json_str = json.dumps(d)
decoded = ForceField.from_dict(json.loads(json_str))
self.assertListEqual(decoded.mass_info, self.ethane.mass_info)
self.assertListEqual(decoded.nonbond_coeffs,
self.ethane.nonbond_coeffs)
self.assertDictEqual(decoded.topo_coeffs, self.ethane.topo_coeffs)
@classmethod
def tearDownClass(cls):
if os.path.exists("ff_test.yaml"):
os.remove("ff_test.yaml")
class FuncTest(unittest.TestCase):
def test_lattice_2_lmpbox(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) \
+ np.random.rand(3, 3) * 0.2 - 0.1
init_latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
init_structure = Structure(init_latt, ["H"] * 10, frac_coords)
origin = np.random.rand(3) * 10 - 5
box, symmop = lattice_2_lmpbox(lattice=init_latt, origin=origin)
boxed_latt = box.to_lattice()
np.testing.assert_array_almost_equal(init_latt.abc, boxed_latt.abc)
np.testing.assert_array_almost_equal(init_latt.angles,
boxed_latt.angles)
cart_coords = symmop.operate_multi(init_structure.cart_coords) - origin
boxed_structure = Structure(boxed_latt, ["H"] * 10, cart_coords,
coords_are_cartesian=True)
np.testing.assert_array_almost_equal(boxed_structure.frac_coords,
frac_coords)
tetra_latt = Lattice.tetragonal(5, 5)
tetra_box, _ = lattice_2_lmpbox(tetra_latt)
self.assertIsNone(tetra_box.tilt)
ortho_latt = Lattice.orthorhombic(5, 5, 5)
ortho_box, _ = lattice_2_lmpbox(ortho_latt)
self.assertIsNone(ortho_box.tilt)
rot_tetra_latt = Lattice([[5, 0, 0], [0, 2, 2], [0, -2, 2]])
_, rotop = lattice_2_lmpbox(rot_tetra_latt)
np.testing. \
assert_array_almost_equal(rotop.rotation_matrix,
[[1, 0, 0],
[0, 2 ** 0.5 / 2, 2 ** 0.5 / 2],
[0, -2 ** 0.5 / 2, 2 ** 0.5 / 2]])
@unittest.skip("The function is deprecated")
def test_structure_2_lmpdata(self):
matrix = np.diag(np.random.randint(5, 14, size=(3,))) + np.random.rand(3, 3) * 0.2 - 0.1
latt = Lattice(matrix)
frac_coords = np.random.rand(10, 3)
structure = Structure(latt, ["H"] * 10, frac_coords)
ld = structure_2_lmpdata(structure=structure)
box_tilt = [0.0, 0.0, 0.0] if not ld.box_tilt else ld.box_tilt
box_bounds = np.array(ld.box_bounds)
np.testing.assert_array_equal(box_bounds[:, 0], np.zeros(3))
new_matrix = np.diag(box_bounds[:, 1])
new_matrix[1, 0] = box_tilt[0]
new_matrix[2, 0] = box_tilt[1]
new_matrix[2, 1] = box_tilt[2]
new_latt = Lattice(new_matrix)
np.testing.assert_array_almost_equal(new_latt.abc, latt.abc)
np.testing.assert_array_almost_equal(new_latt.angles, latt.angles)
coords = ld.atoms[["x", "y", "z"]].values
new_structure = Structure(new_latt, ['H'] * 10, coords,
coords_are_cartesian=True)
np.testing.assert_array_almost_equal(new_structure.frac_coords,
frac_coords)
self.assertEqual(len(ld.masses), 1)
# test additional elements
ld_elements = structure_2_lmpdata(structure=structure,
ff_elements=["C", "H"])
self.assertEqual(len(ld_elements.masses), 2)
np.testing.assert_array_almost_equal(ld_elements.masses["mass"],
[1.00794, 12.01070])
class CombinedDataTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ec = LammpsData. \
from_file(filename=os.path.join(test_dir, "ec.data"))
cls.fec = LammpsData. \
from_file(filename=os.path.join(test_dir, "fec.data"))
cls.coord = CombinedData. \
parse_xyz(filename=os.path.join(test_dir, "ec_fec.xyz"))
cls.ec_fec1 = CombinedData. \
from_files(os.path.join(test_dir, "ec_fec.xyz"), [1200, 300],
os.path.join(test_dir, "ec.data"),
os.path.join(test_dir, "fec.data"))
cls.ec_fec2 = CombinedData. \
from_lammpsdata([cls.ec, cls.fec], ['EC', 'FEC'], [1200, 300], cls.coord)
def test_from_files(self):
# general tests
ec_fec = self.ec_fec1
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ['cluster1', 'cluster2'])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns),
["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(ec_fec.box.bounds,
[[-1.000000, 54.000000],
[-1.000000, 54.000000],
[-1.000000, 54.000000]])
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
def test_from_lammpsdata(self):
# general tests
ec_fec = self.ec_fec2
# header stats and Nos. of columns
self.assertEqual(ec_fec.names, ['EC', 'FEC'])
self.assertEqual(ec_fec.nums, [1200, 300])
self.assertEqual(ec_fec.masses.shape, (12, 1))
self.assertEqual(ec_fec.atoms.shape, (15000, 6))
self.assertListEqual(list(ec_fec.atoms.columns),
["molecule-ID", "type", "q", "x", "y", "z"])
topo = ec_fec.topology
self.assertEqual(topo["Bonds"].shape, (15000, 3))
self.assertEqual(topo["Angles"].shape, (25500, 4))
self.assertEqual(topo["Dihedrals"].shape, (42000, 5))
self.assertEqual(topo["Impropers"].shape, (1500, 5))
ff = ec_fec.force_field
self.assertEqual(ff["Pair Coeffs"].shape, (12, 2))
self.assertEqual(ff["Bond Coeffs"].shape, (15, 2))
self.assertEqual(ff["Angle Coeffs"].shape, (24, 2))
self.assertEqual(ff["Dihedral Coeffs"].shape, (39, 6))
self.assertEqual(ff["Improper Coeffs"].shape, (2, 3))
# header box
np.testing.assert_array_equal(ec_fec.box.bounds,
[[-1.000000, 54.000000],
[-1.000000, 54.000000],
[-1.000000, 54.000000]])
# body
self.assertEqual(ec_fec.masses.at[7, "mass"], 1.008)
self.assertEqual(ff["Pair Coeffs"].at[9, "coeff2"], 3.750)
self.assertEqual(ff["Bond Coeffs"].at[5, "coeff2"], 1.0900)
self.assertEqual(ff["Angle Coeffs"].at[24, "coeff2"], 108.46005)
self.assertTrue(np.isnan(ff["Dihedral Coeffs"].at[30, "coeff6"]))
self.assertEqual(ff["Improper Coeffs"].at[2, "coeff1"], 10.5)
self.assertEqual(ec_fec.atoms.at[29, "molecule-ID"], 3)
self.assertEqual(ec_fec.atoms.at[29, "type"], 5)
self.assertEqual(ec_fec.atoms.at[29, "q"], 0.0755)
self.assertAlmostEqual(ec_fec.atoms.at[29, "x"], 14.442260)
self.assertEqual(ec_fec.atoms.at[14958, "molecule-ID"], 1496)
self.assertEqual(ec_fec.atoms.at[14958, "type"], 11)
self.assertAlmostEqual(ec_fec.atoms.at[14958, "y"], 41.010962)
self.assertEqual(topo["Bonds"].at[47, "type"], 5)
self.assertEqual(topo["Bonds"].at[47, "atom2"], 47)
self.assertEqual(topo["Bonds"].at[953, "atom1"], 951)
self.assertEqual(topo["Angles"].at[105, "type"], 2)
self.assertEqual(topo["Angles"].at[105, "atom3"], 63)
self.assertEqual(topo["Angles"].at[14993, "atom2"], 8815)
self.assertEqual(topo["Dihedrals"].at[151, "type"], 4)
self.assertEqual(topo["Dihedrals"].at[151, "atom4"], 55)
self.assertEqual(topo["Dihedrals"].at[41991, "type"], 30)
self.assertEqual(topo["Dihedrals"].at[41991, "atom2"], 14994)
self.assertEqual(topo["Impropers"].at[4, "atom4"], 34)
def test_get_string(self):
# general tests
ec_fec_lines = self.ec_fec1.get_string().splitlines()
# header information
self.assertEqual(ec_fec_lines[1], "# 1200 cluster1 + 300 cluster2")
# data type consistency tests
self.assertEqual(ec_fec_lines[98], "1 harmonic 3.200000000 -1 2")
self.assertEqual(ec_fec_lines[109], "12 charmm 2.700000000 2 180 0.0")
self.assertEqual(ec_fec_lines[113],
"16 multi/harmonic 0.382999522 -1.148998570 0.000000000 1.531998090 0.000000000")
self.assertEqual(ec_fec_lines[141], "1 10.5 -1 2")
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import os
import re
import unittest2 as unittest
from webkitpy.common.host import Host
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
from webkitpy.w3c.test_converter import _W3CTestConverter
DUMMY_FILENAME = 'dummy.html'
DUMMY_PATH = 'dummy/testharness/path'
class W3CTestConverterTest(unittest.TestCase):
# FIXME: When we move to using a MockHost, this method should be removed, since
# then we can just pass in a dummy dir path
def fake_dir_path(self, dirname):
filesystem = Host().filesystem
webkit_root = WebKitFinder(filesystem).webkit_base()
return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
def test_read_prefixed_property_list(self):
""" Tests that the current list of properties requiring the -webkit- prefix load correctly """
# FIXME: We should be passing in a MockHost here ...
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME)
prop_list = converter.prefixed_properties
self.assertTrue(prop_list, 'No prefixed properties found')
def test_convert_for_webkit_nothing_to_convert(self):
""" Tests convert_for_webkit() using a basic test that has nothing to convert """
test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>CSS Test: DESCRIPTION OF TEST</title>
<link rel="author" title="NAME_OF_AUTHOR"
href="mailto:EMAIL OR http://CONTACT_PAGE"/>
<link rel="help" href="RELEVANT_SPEC_SECTION"/>
<meta name="assert" content="TEST ASSERTION"/>
<style type="text/css"><![CDATA[
CSS FOR TEST
]]></style>
</head>
<body>
CONTENT OF TEST
</body>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_no_conversion_happened(converted, test_html)
def test_convert_for_webkit_harness_only(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path("harnessonly")
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME)
converter.feed(test_html)
converter.close()
converted = converter.output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, [])
def test_convert_for_webkit_properties_only(self):
""" Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test1@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME)
test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_for_webkit_harness_and_properties(self):
""" Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
test_html = """<html>
<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<style type="text/css">
#block1 { @test0@: propvalue; }
#block2 { @test1@: propvalue; }
#block3 { @test2@: propvalue; }
</style>
</head>
<body>
<div id="elem1" style="@test3@: propvalue;"></div>
</body>
</html>
"""
fake_dir_path = self.fake_dir_path('harnessandprops')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME)
oc = OutputCapture()
oc.capture_output()
try:
test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
self.verify_prefixed_properties(converted, test_content[0])
def test_convert_test_harness_paths(self):
""" Tests convert_testharness_paths() with a test that uses all three testharness files """
test_html = """<head>
<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
</head>
"""
fake_dir_path = self.fake_dir_path('testharnesspaths')
converter = _W3CTestConverter(fake_dir_path, DUMMY_FILENAME)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_html)
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 2, 1)
def test_convert_prefixed_properties(self):
""" Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
10 in one style block + 5 in another style
block + 5 inline styles, including one with multiple prefixed properties.
The properties in the test content are in all sorts of wack formatting.
"""
test_html = """<html>
<style type="text/css"><![CDATA[
.block1 {
width: 300px;
height: 300px
}
.block2 {
@test0@: propvalue;
}
.block3{@test1@: propvalue;}
.block4 { @test2@:propvalue; }
.block5{ @test3@ :propvalue; }
#block6 { @test4@ : propvalue; }
#block7
{
@test5@: propvalue;
}
#block8 { @test6@: propvalue; }
#block9:pseudo
{
@test7@: propvalue;
@test8@: propvalue propvalue propvalue;;
}
]]></style>
</head>
<body>
<div id="elem1" style="@test9@: propvalue;"></div>
<div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
<div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
<div id="elem3" style="@test14@:propvalue"></div>
</body>
<style type="text/css"><![CDATA[
.block10{ @test15@: propvalue; }
.block11{ @test16@: propvalue; }
.block12{ @test17@: propvalue; }
#block13:pseudo
{
@test18@: propvalue;
@test19@: propvalue;
}
]]></style>
</html>
"""
converter = _W3CTestConverter(DUMMY_PATH, DUMMY_FILENAME)
test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
oc = OutputCapture()
oc.capture_output()
try:
converter.feed(test_content[1])
converter.close()
converted = converter.output()
finally:
oc.restore_output()
self.verify_conversion_happened(converted)
self.verify_prefixed_properties(converted, test_content[0])
def verify_conversion_happened(self, converted):
self.assertTrue(converted, "conversion didn't happen")
def verify_no_conversion_happened(self, converted, original):
self.assertEqual(converted[1], original, 'test should not have been converted')
def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
if isinstance(converted, basestring):
converted = BeautifulSoup(converted)
resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
# Verify the original paths are gone, and the new paths are present.
orig_path_pattern = re.compile('\"/resources/testharness')
self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
new_relpath = os.path.relpath(resources_dir, test_path)
relpath_pattern = re.compile(new_relpath)
self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
def verify_prefixed_properties(self, converted, test_properties):
self.assertEqual(len(set(converted[0])), len(set(test_properties)), 'Incorrect number of properties converted')
for test_prop in test_properties:
self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
def generate_test_content(self, full_property_list, num_test_properties, html):
"""Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
test_properties = []
count = 0
while count < num_test_properties:
test_properties.append(full_property_list[count])
count += 1
# Replace the tokens in the testhtml with the test properties. Walk backward
# through the list to replace the double-digit tokens first
index = len(test_properties) - 1
while index >= 0:
# Use the unprefixed version
test_prop = test_properties[index].replace('-webkit-', '')
# Replace the token
html = html.replace('@test' + str(index) + '@', test_prop)
index -= 1
return (test_properties, html)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import sys
import signal
from test_dist_base import TestDistRunnerBase, runtime_main
paddle.enable_static()
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class SE_ResNeXt():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = self.conv_bn_layer(
input=conv,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
out = fluid.layers.fc(
input=drop,
size=class_dim,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)))
return out
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
filter_size = 1
return self.conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality,
reduction_ratio):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
# avoid pserver CPU init differs from GPU
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def squeeze_excitation(self, input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(
input=pool,
size=num_channels // reduction_ratio,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
act='relu')
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(
input=squeeze,
size=num_channels,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
class DistSeResneXt2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False):
# Input data
image = fluid.layers.data(
name="data", shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name="int64", shape=[1], dtype='int64')
# Train program
model = SE_ResNeXt(layers=50)
out = model.net(input=image, class_dim=102)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
# Evaluator
test_program = fluid.default_main_program().clone(for_test=True)
# Optimization
total_images = 6149 # flowers
epochs = [30, 60, 90]
step = int(total_images / batch_size + 1)
bd = [step * e for e in epochs]
base_lr = 0.1
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
if not use_dgc:
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
else:
optimizer = fluid.optimizer.DGCMomentumOptimizer(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
rampup_begin_step=0,
regularization=fluid.regularizer.L2Decay(1e-4))
optimizer.minimize(avg_cost)
# Reader
train_reader = paddle.batch(
paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size)
return test_program, avg_cost, train_reader, test_reader, acc_top1, out
if __name__ == "__main__":
runtime_main(DistSeResneXt2x2)
|
|
#Scrape a facebook group and add to a specified user's playlist
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import argparse
import re
import csv
from datetime import datetime
from common import posts
from common.posts import facebook_post
from common.posts.facebook_post import FacebookPost as FacebookPost
from common import scrapers
from common.scrapers import facebook_scraper
from common.scrapers.facebook_scraper import FacebookScraper as FacebookScraper
from common import players
from common.players import spotify
from common.players.spotify import SpotifyPlayer
#if not silent and len(authors)==0 and len(date_range)==0 and min_likes==0 and min_loves==0 and limit==0:
def scrape_fb_group_to_spotify_playlist(**kwargs):
track_ids = []
no_dump = kwargs['no_dump']
out_file = kwargs['out_file']
fb_criteria = unpack_fb_critieria_from_args(**kwargs)
fb_scraper = FacebookScraper(fb_criteria)
spotify_player = SpotifyPlayer(kwargs['spfy_user_id'])
if 'in_file' not in kwargs or kwargs['in_file'] is None:
track_ids = scrape_track_ids_and_dump(fb_criteria, fb_scraper, spotify_player, no_dump, out_file)
else:
track_ids = get_track_ids_from_file(kwargs['in_file'])
#playlist_name = generate_playlist_name(fb_scraper)
playlist_name = 'Trip Hop Downtempo Chill Out Electronica 2017.11.10'
print('adding {num_tracks} tracks to {playlist}'.format(num_tracks=len(track_ids), playlist=playlist_name))
playlist_id = spotify_player.create_playlist(playlist_name)
spotify_player.add_track_ids_to_playlist(kwargs['spfy_user_id'], playlist_id, track_ids)
def unpack_fb_critieria_from_args(**kwargs):
fb_criteria = {
"app_id" : kwargs["fb_app_id"],
"app_secret" : kwargs["fb_app_secret"],
"group_id" : kwargs["fb_group_id"],
"date_range" : (kwargs["begin_date"], kwargs["end_date"]),
"min_likes" : kwargs["min_likes"],
"min_loves" : kwargs["min_loves"],
"limit" : kwargs["limit"]
}
return fb_criteria
def scrape_track_ids_and_dump(fb_criteria, scraper, spfy, no_dump, out_file):
#Create critera object for scraping, refactor this
track_ids = []
print('Logging with criteria:')
print(fb_criteria)
try:
scraper.scrape()
except:
print('Scrape terminated early with error:')
print(sys.exc_info[0])
dump_info = None
playlist_name = generate_playlist_name()
if not no_dump:
dump_info = []
if out_file is None:
out_file = playlist_name.translate({ord(c): None for c in '!@#$\\/'})
get_spotify_track_ids(spfy, scraper.scrape_data, track_ids, dump_info)
if dump_info is not None:
dump_scraped_posts(dump_info, scraper.get_group_friendly_name(), out_file)
return track_ids
def generate_playlist_name(scraper):
fb_group_friendly_name = scraper.get_group_friendly_name()
playlist_name = fb_group_friendly_name + ' {}'.format(datetime.now().strftime('%Y.%m.%d'))
return playlist_name
def get_spotify_track_ids(spfy, scraped_data, track_ids, dump_info):
for post in scraped_data:
track_info = None
track_id = 0
try:
track_info = parse_track_and_artist(post.link_name)
except:
continue
try:
track_id = spfy.get_track_id_from_track_info(track_info)
except:
continue
if track_id != 0:
track_ids.append(track_id)
if dump_info is not None:
dump_info.append((post.link_name, track_info['artist'], track_info['track'], track_info['blob'], track_id))
def dump_scraped_posts(scrape_info, groupname, filename):
"""
Dumps posts from previous scraping to a csv file
:param scrape_info: tuple of (link name, artist, track, blob, spotify track is)
:param groupname: name of the facebook group we're scraping
:param filename: File to dump our scrapejob
"""
dirname = "scrapes"
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(os.path.join(dirname, filename), 'w') as file:
w = csv.writer(file)
w.writerow([groupname])
w.writerow(["link name", "artist", "track", "blob", "spotify track id"])
for scraped_post in scrape_info:
w.writerow(scraped_post)
def get_criteria_from_user(**kwargs):
pass
def parse_track_and_artist(name):
"""
Attempt to parse a particular FB post for a track name and artist
:param name: link name from a post
"""
#Common track formats
#ARTIST - TRACK (YEAR)
#ARTIST - TRACK (Remix)(Official Video) HQ
#ARTIST - TRACK [Label information]
#ARTIST "TRACK"
#ARTIST : TRACK
#ARTIST :: TRACK
#TRACK by ARTIST
#TODO: Keep track name with remix information, remove official video or label info
track_info = None
#remove any parens
name = re.sub(r'\([^\())]*\)', '', name)
#remove label information
name = re.sub(r'\[[^\]]*\]', '', name)
#remove year from title
name = re.sub(r'199\d', '', name)
#Remove "High Quality" from title
name = re.sub(r'HQ', '', name)
name_split = name.split('-')
if len(name_split) == 1:
print('Unable to parse post as ARTIST - TRACK')
print(name)
if(len(name)==0):
raise Exception('Unable to parse post {}'.format(name))
track_info = {'blob': name, 'artist': None, 'track': None}
else:
track_info = {'artist': name_split[0], 'track': name_split[1], 'blob': None}
return track_info
def get_track_ids_from_file(in_file):
"""
TODO: path gore
:param in_file: path for the csv which contains previously scraped track ids
"""
#link name, artist, track, blob, track_id
tid_index = 4
tid_len = 22
if not os.path.exists(in_file):
raise Exception('{} does not exist'.format(in_file))
track_ids = []
with open(in_file, 'r') as file:
reader = csv.reader(file)
for row in reader :
if len(row) >= tid_index + 1:
tid = row[tid_index]
if len(tid)==tid_len:
track_ids.append(tid)
return track_ids
def validate_arguments(kwargs):
#check first for environment variables for id and secret
if 'fb_app_id' not in kwargs or kwargs['fb_app_id'] is None:
try:
print('trying to get fb app id from environment')
kwargs['fb_app_id'] = os.environ['FB_APP_ID']
except:
print('fb app id not passed nor set in environment')
exit(1)
if 'fb_app_secret' not in kwargs or kwargs['fb_app_secret'] is None:
try:
print('trying to get fb app secret from environment')
kwargs['fb_app_secret'] = os.environ['FB_APP_SECRET']
except:
print('fb app secret not passed in or set in environment')
exit(1)
if 'spfy_app_id' not in kwargs or kwargs['spfy_app_id'] is None:
try:
print('trying to get spotify app id from environment')
kwargs['spfy_app_id'] = os.environ['SPOTIPY_CLIENT_ID']
except:
print('spotify app id not passed nor set in environment')
exit(1)
if 'spfy_app_secret' not in kwargs or kwargs['spfy_app_secret'] is None:
try:
print('trying to get spotify app secret from environment')
kwargs['spfy_app_secret'] = os.environ['SPOTIPY_CLIENT_SECRET']
except:
print('spotify app secret not passed in nor set in environment')
exit(1)
if 'fb_group_id' not in kwargs or kwargs['fb_group_id'] is None:
print('group id not specified')
exit(1)
if 'spfy_user_id' not in kwargs or kwargs['spfy_user_id'] is None:
print('spotify user id not specified, tracks will not be added to playlist')
#Read from user if not silent and unspecified
#if ('silent' not in kwargs or not kwargs['silent']) and len(kwargs['authors'])==0 and 'begin_date' not in kwargs and 'end_date' not in kwargs and kwargs['min_likes']==0 and kwargs['min_loves']==0 and kwargs['limit']==0:
#get_criteria_from_user(kwargs)
def parse_arguments():
parser = argparse.ArgumentParser(description='scrape facebook posts into a spotify playlist')
parser.add_argument('fb_group_id', metavar='group_id', type=int, help='group id number for the facebook group to scrape')
parser.add_argument('--spfy_user_id', metavar='username', type=str, help='username for spotify account')
parser.add_argument('--fb_app_id', type=str, help='facebook app id registered for use with graph api. This overrides any value stored in the FB_APP_ID environment variable')
parser.add_argument('--fb_app_secret', type=str, help='facebook app secret. This overrides any value stored in the FB_APP_SECRET environment variable')
parser.add_argument('--spfy_app_id', type=str, help='spotify app id. This overrides any value stored in the SPOTIPY_CLIENT_ID environment variable')
parser.add_argument('--spfy_app_secret', type=str, help='spotify app secret. This overrides any value stored in the SPOTIPY_CLIENT_SECRET environment variable')
#TODO: specify format
parser.add_argument('--begin_date', type=str, help='begin date range for scraping the supplied facebook group in format yyyy-mm-dd',default='')
parser.add_argument('--end_date', type=str, help='end date range for scraping the supplied facebook group yyyy-mm-dd', default='')
parser.add_argument('--authors', type=str, nargs='+', help='specify list of authors whose posts we want', default=[])
parser.add_argument('--min_likes', type=int, help='minimum number of likes required to scrape a post into our playlist', default=0)
parser.add_argument('--min_loves', type=int, help='minimum number of loves required to scrape a post into our playlist', default=0)
parser.add_argument('--limit', type=int, help='limit the maximum number of tracks to add to our scraped playlist', default=0)
parser.add_argument('--silent', help='run script without prompting for criteria if none is provided', action='store_const', const=True, default=False)
parser.add_argument('--scrape_only', help='choose only to scrape data without adding to spotify playlist', action='store_const', const=True, default=False)
parser.add_argument('--in_file', type=str, help='specify path of a previous dump from which to create the playlist')
parser.add_argument('--out_file', type=str, help='specify out file for dumping scrape information')
parser.add_argument('--no_dump', help='set this flag if no output csv is desired', action='store_const', const=True, default=False)
return parser.parse_args()
if(__name__=="__main__"):
arguments = vars(parse_arguments())
print('parsed arguments')
print(arguments)
validate_arguments(arguments)
scrape_fb_group_to_spotify_playlist(**arguments)
|
|
# -*- coding: utf-8 -*-
"""This module contains some Neural Networks functions.
"""
__author__ = 'Wenzhi Mao'
__all__ = ["NeuralNet"]
class NeuralNet(object):
'''This is a neu web class to setup a web and perform the training.
The content is based on the Artificial Neural Networks.
The training is performed by the BP algorithm.'''
def __init__(self, webshape=None, label=None, data=None, **kwargs):
'''the init function of `neu_net` class. webshape, label, data
could be assigned by options.'''
self.inputdata = None
self.outputdata = None
self.label = None
self.trans = None
self.webshape = None
self._clockdict = {}
if webshape is not None:
self.setWebshape(webshape)
if label is not None:
self.setLabel(label)
if data is not None and len(data) == 2:
self.setInput(data[0])
self.setOutput(data[1])
def _tic(self, key, **kwargs):
'''Add a start time for some key event.'''
from time import time
self._clockdict[key] = time()
def _toc(self, key, **kwargs):
'''Remove the key and return the time used for the key event.'''
from time import time
tic = self._clockdict.pop(key, None)
if tic:
return time() - tic
else:
return None
def __str__(self):
return "Neural Networks from mbio"
def _clear_data(self, keep=[], **kwargs):
'''Delete all data in this class.
You could use keep to keep the variables you want to keep.'''
if not ('webshape' in keep or 'shape' in keep):
self.webshape = None
if not ('input' in keep or 'inputdata' in keep):
self.inputdata = None
if not ('output' in keep or 'outputdata' in keep):
self.outputdata = None
if not ('label' in keep):
self.label = None
if not ('trans' in keep):
self.trans = None
self._clockdict = {}
def setWebshape(self, webshape=[], **kwargs):
'''Set the web shape (nodes for each layer) for the network.'''
from ..IO.output import printInfo, printError
from numpy import array, int32, zeros, float64, matrix, random, ndarray
if len(webshape) == 0:
printError('No web shape data provided. Ignored.')
return None
webshape = array(webshape, dtype=int32)
if isinstance(self.webshape, ndarray) and (self.webshape == webshape).all():
printInfo('webshape unchanged, the trans also unchanged.', 1)
else:
if self.inputdata is not None and isinstance(self.inputdata, ndarray) and self.inputdata.shape[0] != webshape[0]:
printError("The web shape and the inputdata doesn't fit.")
self.inputdata = None
printError("The Input data has been deleted.")
if self.outputdata is not None and isinstance(self.outputdata, ndarray) and self.outputdata.shape[0] != webshape[-1]:
printError("The web shape and the outputdata doesn't fit.")
self.outputdata = None
printError("The Output data has been deleted.")
self.webshape = webshape
self.trans = zeros((webshape.shape[0] - 1), dtype=ndarray)
for i in xrange(len(webshape) - 1):
self.trans[i] = matrix(
random.random((webshape[i] + 1, webshape[i + 1])) * 0.1 - 0.05, dtype=float64)
self.label = ["x{0}".format(i + 1) for i in xrange(webshape[0])]
return None
def setLabel(self, label=[], **kwargs):
'''Set the labels for the inputs.'''
from ..IO.output import printInfo, printError
if len(label) == 0:
printError('No label provided. Ignored.')
return None
if not isinstance(label, list):
label = list(label)
label = [str(i) for i in label]
if self.webshape is not None:
if self.webshape[0] != len(label):
printError(
"The length of the label list doesn't fit the webshape[0].")
else:
self.label = label
else:
self.label = label
def setInput(self, inputdata=None, **kwargs):
'''Set the input data for the network.'''
from ..IO.output import printInfo, printError
from numpy import ndarray, matrix, float64, zeros, int32
if inputdata is None:
self.printError('No input data provided. Ignored.')
return None
if isinstance(inputdata, (ndarray, list)):
try:
inputdata = matrix(inputdata, dtype=float64)
except:
printError("Input data format mistake.")
return None
if isinstance(inputdata, matrix):
if not inputdata.dtype == float64:
inputdata = matrix(inputdata, dtype=float64)
else:
printError(
"Please provide a list, numpy.ndarray or a numpy.matrix.")
return None
if inputdata.ndim != 2:
printError("The input must be 2-dimension data.")
return None
if self.webshape is not None:
if inputdata.shape[1] != self.webshape[0]:
printError(
"The input data shape[1] doesn't fit the webshape[0].")
return None
if self.outputdata is not None:
if self.inputdata.shape[0] != outputdata.shape[0]:
printError("The sizes of Input and Output are different.")
return None
self.inputdata = inputdata
return None
def setOutput(self, outputdata=None, **kwargs):
'''Set the output data for the network.'''
from ..IO.output import printInfo, printError
from numpy import ndarray, matrix, float64, zeros, int32
if outputdata is None:
printError('No output data provided. Ignored.')
return None
if isinstance(outputdata, (ndarray, list)):
try:
outputdata = matrix(outputdata, dtype=float64)
except:
printError("Output data format mistake.")
return None
if isinstance(outputdata, matrix):
if not outputdata.dtype == float64:
outputdata = matrix(outputdata, dtype=float64)
else:
printError(
"Please provide a list, numpy.ndarray or a numpy.matrix.")
return None
if outputdata.ndim != 2:
printError("The output must be 2-dimension data.")
return None
if self.webshape is not None:
if outputdata.shape[1] != self.webshape[-1]:
printError(
"The output data shape[1] doesn't fit the webshape[-1].")
return None
if self.inputdata is not None:
if self.inputdata.shape[0] != outputdata.shape[0]:
printError("The sizes of Input and Output are different.")
return None
self.outputdata = outputdata
return None
def fit(self, times, step='auto', **kwargs):
'''Train the data.'''
from .CNN_p import fit_ANN_BP
from ..IO.output import printInfo, printError
if self.inputdata.shape[0] != self.outputdata.shape[0]:
printError("The sizes of Input and Output are different.")
return None
if self.inputdata.shape[1] != self.webshape[0]:
printError("The sizes of Input doesnot fit the webshape.")
return None
if self.outputdata.shape[1] != self.webshape[-1]:
printError("The sizes of Output doesnot fit the webshape.")
return None
for i in xrange(len(self.webshape) - 1):
if self.trans[i].shape != (self.webshape[i] + 1, self.webshape[i + 1]):
printError("`trans`[{0}] shape wrong".format(i))
result = 0
return None
result = fit_ANN_BP(shape=self.webshape, input=self.inputdata,
output=self.outputdata, trans=self.trans, times=times)
if isinstance(result, list):
if result[0] is None:
printError(result[1])
return None
return result
# def __simulate_point(self, inputd, outputd, step=0.1, **kwargs):
# '''Train one data point.'''
# outputd1 = np.array(outputd)
# layer = len(self.webshape) - 1
# temp = inputd
# save = [np.array(inputd)]
# for i in xrange(layer):
# temp = np.concatenate((temp, np.ones((1, 1))), axis=1)
# temp = temp.dot(self.trans[i])
# temp = 1. / (1. + np.exp(-temp))
# save = save + [np.array(temp)]
# wucha = [save[-1] * (1 - save[-1]) * (outputd1 - save[-1])]
# for i in reversed(xrange(layer)):
# wucha = [
# save[i] * (1 - save[i]) * np.array(wucha[0].dot(self.trans[i][:-1].T))] + wucha
# for i in xrange(layer):
# self.trans[i] = self.trans[
# i] + (np.concatenate((save[i], np.ones((1, 1))), axis=1).T.dot(wucha[i + 1])) * step
# return ((outputd1 - save[-1])*(outputd1 - save[-1])).sum()
def predict(self, inputtest, **kwargs):
from ..IO.output import printInfo, printError
from numpy import ndarray, array
if not isinstance(inputtest, ndarray):
inputtest = array(inputtest)
# for i in xrange(inputtest.shape[0]):
# layer = len(self.webshape) - 1
# temp = inputtest[i]
# for j in xrange(layer):
# temp = np.concatenate((temp, np.ones((1, 1))), axis=1)
# temp = temp.dot(self.trans[j])
# mark += [f(temp) == f(outputtest[i])]
# return mark
def _check(self, **kwargs):
'''Check all data compatible to each other.
Check only the data for calculation.'''
from ..IO.output import printInfo, printError
from numpy import ndarray, int32, float64
result = 1
# Type Check
# webshape
if not isinstance(self.webshape, ndarray):
printInfo("`webshape` type")
result = 0
else:
if self.webshape.dtype != int32:
printInfo("`webshape` dtype")
result = 0
if self.webshape.ndim != 1:
printInfo("`webshape` shape")
result = 0
if len(self.webshape) < 2:
printInfo("`webshape` must has more than 2 layers")
result = 0
# inputdata
if not isinstance(self.inputdata, ndarray):
printInfo("`inputdata` type")
result = 0
else:
if self.inputdata.dtype != float64:
printInfo("`inputdata` dtype")
result = 0
if self.inputdata.ndim != 2:
printInfo("`inputdata` shape")
result = 0
# outputdata
if not isinstance(self.outputdata, ndarray):
printInfo("`outputdata` type")
result = 0
else:
if self.outputdata.dtype != float64:
printInfo("`outputdata` dtype")
result = 0
if self.outputdata.ndim != 2:
printInfo("`outputdata` shape")
result = 0
# trans
if not isinstance(self.trans, ndarray):
printInfo("`trans` type")
result = 0
else:
if self.trans.dtype != object:
printInfo("`trans` dtype")
result = 0
if self.trans.ndim != 1:
printInfo("`trans` shape")
result = 0
for i in xrange(len(self.trans)):
if not isinstance(self.trans[i], ndarray):
printInfo("`trans[{0}]` dtype".format(i))
result = 0
# webshape
if isinstance(self.inputdata, ndarray) and isinstance(self.webshape, ndarray):
if self.inputdata.shape[1] != self.webshape[0]:
printInfo("`inputdata` doesn't fit webshape[0]")
result = 0
if isinstance(self.outputdata, ndarray) and isinstance(self.webshape, ndarray):
if self.outputdata.shape[1] != self.webshape[-1]:
printInfo("`outputdata` doesn't fit webshape[-1]")
result = 0
if isinstance(self.inputdata, ndarray) and isinstance(self.outputdata, ndarray):
if self.inputdata.shape[0] != self.outputdata.shape[0]:
printInfo("`inputdata` doesn't fit `outputdata`")
result = 0
if isinstance(self.webshape, ndarray) and isinstance(self.trans, ndarray):
for i in xrange(len(self.webshape) - 1):
if self.trans[i].shape != (self.webshape[i] + 1, self.webshape[i + 1]):
printInfo("`trans`[{0}] shape wrong".format(i))
result = 0
return bool(result)
# def keepsize(n, **kwargs):
# '''return the sample size for a data set n.'''
# return int(n - round((1 - 1. / n)**n * n))
# def split_train_test(a, keep=None, **kwargs):
# '''Split the data to train set and test set by random sample.'''
# n = a.shape[0]
# if keep == None:
# keep = keepsize(n)
# keeplist = xrange(n)
# removelist = []
# while len(keeplist) > keep:
# removelist.append(
# keeplist.pop(np.random.random_integers(0, len(keeplist) - 1)))
# np.random.shuffle(removelist)
# np.random.shuffle(keeplist)
# return a[keeplist], a[removelist]
# def toclass(a, classier=None, **kwargs):
# '''convert a matrix to a matrix split every class.'''
# if classier != None:
# setitem = sorted(list(set([i.item() for i in a])))
# else:
# setitem = classier
# result = np.matrix(np.zeros((a.shape[0], len(setitem))))
# for i in xrange(len(setitem)):
# result[:, i] = (a == setitem[i])
# return result
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
IR stores typedefs and they are resolved by the code generator.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call. See TypedefResolver class in code_generator_v8.py.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlSerializer
IdlStringifier
IdlIterable < IdlIterableOrMaplikeOrSetlike
IdlMaplike < IdlIterableOrMaplikeOrSetlike
IdlSetlike < IdlIterableOrMaplikeOrSetlike
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: Object with one or more attributes that is a type.
IdlArgument is 'picklable', as it is stored in interfaces_info.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlType, IdlUnionType, IdlArrayType, IdlSequenceType, IdlFrozenArrayType, IdlNullableType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
################################################################################
# TypedObject
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
by the TypedefResolver before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type_attributes = ('idl_type',)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, idl_name, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.implements = []
self.interfaces = {}
self.idl_name = idl_name
self.typedefs = {}
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(idl_name, child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(idl_name, child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
typedef = IdlTypedef(child)
self.typedefs[typedef.name] = typedef
elif child_class == 'Enum':
enumeration = IdlEnum(idl_name, child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(idl_name, child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
self.implements.append(IdlImplement(child))
elif child_class == 'Dictionary':
dictionary = IdlDictionary(idl_name, child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_definitions(self)
for interface in self.interfaces.itervalues():
interface.accept(visitor)
for callback_function in self.callback_functions.itervalues():
callback_function.accept(visitor)
for dictionary in self.dictionaries.itervalues():
dictionary.accept(visitor)
for typedef in self.typedefs.itervalues():
typedef.accept(visitor)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
continue
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node, arguments_node = children
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(idl_name, arguments_node)
def accept(self, visitor):
visitor.visit_callback_function(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary(self)
for member in self.members:
member.accept(visitor)
class IdlDictionaryMember(TypedObject):
def __init__(self, idl_name, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.idl_name = idl_name
self.is_required = bool(node.GetProperty('REQUIRED'))
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary_member(self)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
# FIXME: remove, just treat enums as a dictionary
def __init__(self, idl_name, node):
self.idl_name = idl_name
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
################################################################################
# Typedefs
################################################################################
class IdlTypedef(object):
idl_type_attributes = ('idl_type',)
def __init__(self, node):
self.name = node.GetName()
self.idl_type = typedef_node_to_type(node)
def accept(self, visitor):
visitor.visit_typedef(self)
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, idl_name, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.serializer = None
self.stringifier = None
self.iterable = None
self.has_indexed_elements = False
self.maplike = None
self.setlike = None
self.original_interface = None
self.partial_interfaces = []
if not node: # Early exit for IdlException.__init__
return
self.is_callback = bool(node.GetProperty('CALLBACK'))
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = bool(node.GetProperty('Partial'))
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
has_indexed_property_getter = False
has_integer_typed_length = False
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attr = IdlAttribute(idl_name, child)
if attr.idl_type.is_integer_type and attr.name == 'length':
has_integer_typed_length = True
self.attributes.append(attr)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
op = IdlOperation(idl_name, child)
if 'getter' in op.specials and str(op.arguments[0].idl_type) == 'unsigned long':
has_indexed_property_getter = True
self.operations.append(op)
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Serializer':
self.serializer = IdlSerializer(idl_name, child)
self.process_serializer()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(idl_name, child)
self.process_stringifier()
elif child_class == 'Iterable':
self.iterable = IdlIterable(idl_name, child)
elif child_class == 'Maplike':
self.maplike = IdlMaplike(idl_name, child)
elif child_class == 'Setlike':
self.setlike = IdlSetlike(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
if len(filter(None, [self.iterable, self.maplike, self.setlike])) > 1:
raise ValueError('Interface can only have one of iterable<>, maplike<> and setlike<>.')
if has_integer_typed_length and has_indexed_property_getter:
self.has_indexed_elements = True
def accept(self, visitor):
visitor.visit_interface(self)
for attribute in self.attributes:
attribute.accept(visitor)
for constant in self.constants:
constant.accept(visitor)
for constructor in self.constructors:
constructor.accept(visitor)
for custom_constructor in self.custom_constructors:
custom_constructor.accept(visitor)
for operation in self.operations:
operation.accept(visitor)
if self.iterable:
self.iterable.accept(visitor)
elif self.maplike:
self.maplike.accept(visitor)
elif self.setlike:
self.setlike.accept(visitor)
def process_serializer(self):
"""Add the serializer's named operation child, if it has one, as a regular
operation of this interface."""
if self.serializer.operation:
self.operations.append(self.serializer.operation)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
if self.serializer is None:
self.serializer = other.serializer
if self.stringifier is None:
self.stringifier = other.stringifier
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, idl_name, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self, idl_name)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(idl_name, child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, idl_name, node):
self.is_read_only = bool(node.GetProperty('READONLY'))
self.is_static = bool(node.GetProperty('STATIC'))
self.idl_name = idl_name
self.name = node.GetName()
# Defaults, overridden below
self.idl_type = None
self.extended_attributes = {}
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_attribute(self)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.idl_name = idl_name
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
else:
self.extended_attributes = {}
def accept(self, visitor):
visitor.visit_constant(self)
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
return 'String("%s")' % self.value
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'nullptr'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type in ['boolean', 'sequence']:
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, idl_name, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
self.idl_name = idl_name
self.idl_type = None
self.is_static = False
if not node:
return
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = bool(node.GetProperty('STATIC'))
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(idl_name, child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, idl_name, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls(idl_name)
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, idl_name, arguments_node):
constructor = cls(idl_name)
constructor.name = name
constructor.arguments = arguments_node_to_arguments(idl_name, arguments_node)
constructor.is_constructor = True
return constructor
def accept(self, visitor):
visitor.visit_operation(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, idl_name, node=None):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = False # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.idl_name = idl_name
self.default_value = None
if not node:
return
self.is_optional = node.GetProperty('OPTIONAL')
self.name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = bool(child.GetProperty('ELLIPSIS'))
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def __getstate__(self):
# FIXME: Return a picklable object which has enough information to
# unpickle.
return {}
def __setstate__(self, state):
pass
def accept(self, visitor):
visitor.visit_argument(self)
def arguments_node_to_arguments(idl_name, node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(idl_name, argument_node)
for argument_node in node.GetChildren()]
################################################################################
# Serializers
################################################################################
class IdlSerializer(object):
def __init__(self, idl_name, node):
self.attribute_name = node.GetProperty('ATTRIBUTE')
self.attribute_names = None
self.operation = None
self.extended_attributes = {}
self.is_attribute = False
self.is_getter = False
self.is_inherit = False
self.is_list = False
self.is_map = False
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Operation':
self.operation = IdlOperation(idl_name, child)
elif child_class == 'List':
self.is_list = True
self.is_getter = bool(child.GetProperty('GETTER'))
self.attributes = child.GetProperty('ATTRIBUTES')
elif child_class == 'Map':
self.is_map = True
self.is_attribute = bool(child.GetProperty('ATTRIBUTE'))
self.is_getter = bool(child.GetProperty('GETTER'))
self.is_inherit = bool(child.GetProperty('INHERIT'))
self.attributes = child.GetProperty('ATTRIBUTES')
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, idl_name, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(idl_name, child)
elif child_class == 'Operation':
operation = IdlOperation(idl_name, child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Iterable, Maplike, Setlike
################################################################################
class IdlIterableOrMaplikeOrSetlike(TypedObject):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.type_children = []
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Type':
self.type_children.append(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlIterable(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, idl_name, node):
super(IdlIterable, self).__init__(idl_name, node)
if len(self.type_children) == 1:
self.key_type = None
self.value_type = type_node_to_type(self.type_children[0])
elif len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of type children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_iterable(self)
class IdlMaplike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, idl_name, node):
super(IdlMaplike, self).__init__(idl_name, node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_maplike(self)
class IdlSetlike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('value_type',)
def __init__(self, idl_name, node):
super(IdlSetlike, self).__init__(idl_name, node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 1:
self.value_type = type_node_to_type(self.type_children[0])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_setlike(self)
################################################################################
# Implement statements
################################################################################
class IdlImplement(object):
def __init__(self, node):
self.left_interface = node.GetName()
self.right_interface = node.GetProperty('REFERENCE')
################################################################################
# Extended attributes
################################################################################
class Exposure:
"""An Exposure holds one Exposed or RuntimeEnabled condition.
Each exposure has two properties: exposed and runtime_enabled.
Exposure(e, r) corresponds to [Exposed(e r)]. Exposure(e) corresponds to
[Exposed=e].
"""
def __init__(self, exposed, runtime_enabled=None):
self.exposed = exposed
self.runtime_enabled = runtime_enabled
def ext_attributes_node_to_extended_attributes(idl_name, node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
children = child.GetChildren()
if len(children) != 1:
raise ValueError('[SetWrapperReferenceTo] supports only one child.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = IdlArgument(idl_name, children[0])
elif name == 'Exposed':
if child_class and child_class != 'Arguments':
raise ValueError('[Exposed] only supports Arguments as child, but has child of class: %s' % child_class)
exposures = []
if child_class == 'Arguments':
exposures = [Exposure(exposed=str(arg.idl_type),
runtime_enabled=arg.name)
for arg in arguments_node_to_arguments('*', child)]
else:
value = extended_attribute_node.GetProperty('VALUE')
if type(value) is str:
exposures = [Exposure(exposed=value)]
else:
exposures = [Exposure(exposed=v) for v in value]
extended_attributes[name] = exposures
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(idl_name, extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', idl_name, arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', idl_name, arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', idl_name, arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = bool(node.GetProperty('UNRESTRICTED'))
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class in ['Sequence', 'FrozenArray']:
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
elif node_class == 'Promise':
return IdlType('Promise')
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
class_name = node.GetClass()
if len(children) != 1:
raise ValueError('%s node expects exactly 1 child, got %s' % (class_name, len(children)))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
if class_name == 'Sequence':
sequence_type = IdlSequenceType(element_type)
elif class_name == 'FrozenArray':
sequence_type = IdlFrozenArrayType(element_type)
else:
raise ValueError('Unexpected node: %s' % class_name)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
################################################################################
# Visitor
################################################################################
class Visitor(object):
"""Abstract visitor class for IDL definitions traverse."""
def visit_definitions(self, definitions):
pass
def visit_typed_object(self, typed_object):
pass
def visit_callback_function(self, callback_function):
self.visit_typed_object(callback_function)
def visit_dictionary(self, dictionary):
pass
def visit_dictionary_member(self, member):
self.visit_typed_object(member)
def visit_interface(self, interface):
pass
def visit_typedef(self, typedef):
self.visit_typed_object(typedef)
def visit_attribute(self, attribute):
self.visit_typed_object(attribute)
def visit_constant(self, constant):
self.visit_typed_object(constant)
def visit_operation(self, operation):
self.visit_typed_object(operation)
def visit_argument(self, argument):
self.visit_typed_object(argument)
def visit_iterable(self, iterable):
self.visit_typed_object(iterable)
def visit_maplike(self, maplike):
self.visit_typed_object(maplike)
def visit_setlike(self, setlike):
self.visit_typed_object(setlike)
|
|
import itertools
import numpy as np
nax = np.newaxis
import gaussians
from misc import array_map, my_inv, full_shape, broadcast, dot, process_slice, match_shapes, _err_string, set_err_info, transp
import scipy.linalg
class BaseMatrix:
def __init__(self):
self.mutable = False
def set_mutable(self, m):
self.mutable = m
def __rmul__(self, other):
return self * other
def allclose(self, other):
self, other = self.full(), other.full()
es = _err_string(self._S, other._S)
set_err_info('psd_matrices', [('S', es)])
return np.allclose(self._S, other._S)
def __getitem__(self, slc):
return self.__slice__(slc)
def __setitem__(self, slc, other):
return self.__setslice__(slc, other)
class FullMatrix(BaseMatrix):
def __init__(self, S):
BaseMatrix.__init__(self)
self._S = S
self.shape = S.shape[:-2]
self.ndim = len(self.shape)
self.dim = S.shape[-1]
self.shape_str = 'S=%s' % str(S.shape)
def full(self):
return self
def copy(self):
return FullMatrix(self._S.copy())
def elt(self, i, j):
return self._S[..., i, j]
def col(self, j):
return self._S[..., :, j]
def __slice__(self, slc):
S_slc = process_slice(slc, self._S.shape, 2)
return FullMatrix(self._S[S_slc])
def __setslice__(self, slc, other):
if not self.mutable:
raise RuntimeError('Attempt to modify an immutable matrix')
S_slc = process_slice(slc, self._S.shape, 2)
self._S[S_slc] += other.full()._S
def dot(self, x):
#return array_map(np.dot, [self._S, x], self.ndim)
return dot(self._S, x)
def qform(self, x):
#temp = array_map(np.dot, [self._S, x], self.ndim)
temp = dot(self._S, x)
return (temp * x).sum(-1)
def pinv(self):
try:
return FullMatrix(array_map(my_inv, [self._S], self.ndim))
except np.linalg.LinAlgError:
return FullMatrix(array_map(np.linalg.pinv, [self._S], self.ndim))
def inv(self):
return FullMatrix(array_map(my_inv, [self._S], self.ndim))
def __add__(self, other):
other = other.full()
return FullMatrix(self._S + other._S)
def __sub__(self, other):
other = other.full()
return FullMatrix(self._S - other._S)
def __mul__(self, other):
other = np.array(other)
return FullMatrix(self._S * other[..., nax, nax])
def sum(self, axis):
assert axis >= 0
return FullMatrix(self._S.sum(axis))
def logdet(self):
_, ld = array_map(np.linalg.slogdet, [self._S], self.ndim)
return ld
def alat(self, A):
return FullMatrix(dot(A, dot(self._S, transp(A))))
def rescale(self, a):
slc = (nax,) * len(self.shape) + (slice(None), slice(None))
return self.alat(a * np.eye(self.dim)[slc])
def conv(self, other):
other = other.full()
P = array_map(my_inv, [self._S + other._S], self.ndim)
return FullMatrix(dot(self._S, dot(P, other._S)))
def sqrt_dot(self, x):
L = array_map(np.linalg.cholesky, [self._S + 1e-10 * np.eye(self.dim)], self.ndim)
return dot(L, x)
def add_dummy_dimension(self):
S = np.zeros(self.shape + (self.dim+1, self.dim+1))
S[..., 1:, 1:] = self._S
return FullMatrix(S)
def to_eig(self):
d, Q = array_map(np.linalg.eigh, [self._S], self.ndim)
return FixedEigMatrix(d, Q, 0)
@staticmethod
def random(shape, dim, rank=None):
if rank is None:
rank = dim
A = np.random.normal(size=shape + (dim, rank))
S = dot(A, transp(A))
return FullMatrix(S)
class DiagonalMatrix(BaseMatrix):
def __init__(self, s):
BaseMatrix.__init__(self)
s = np.asarray(s)
self._s = s
self.shape = s.shape[:-1]
self.ndim = len(self.shape)
self.dim = s.shape[-1]
self.shape_str = 's=%s' % str(s.shape)
def full(self):
S = array_map(np.diag, [self._s], self.ndim)
return FullMatrix(S)
def copy(self):
return DiagonalMatrix(self._s.copy())
def elt(self, i, j):
if i == j:
return self._s[..., i]
else:
return np.zeros(self.shape)
def col(self, j):
result = np.zeros(self.shape + (self.dim,))
result[..., j] = self._s[..., j]
return result
def __slice__(self, slc):
slc = process_slice(slc, self._s.shape, 1)
return DiagonalMatrix(self._s[slc])
def __setslice__(self, slc, other):
if not self.mutable:
raise RuntimeError('Attempt to modify an immutable matrix')
if isinstance(other, DiagonalMatrix):
slc = process_slice(slc, self._s.shape, 1)
self._s[slc] = other._s
else:
raise RuntimeError('Cannot assign a DiagonalMatrix to a %s' % other.__class__)
def dot(self, x):
return self._s * x
def qform(self, x):
return (self._s * x**2).sum(-1)
def pinv(self):
return DiagonalMatrix(np.where(self._s > 0., 1. / self._s, 0.))
def inv(self):
return DiagonalMatrix(1. / self._s)
def __add__(self, other):
if isinstance(other, DiagonalMatrix):
return DiagonalMatrix(self._s + other._s)
elif isinstance(other, EyeMatrix):
return DiagonalMatrix(self._s + other._s[..., nax])
else:
return self.full() + other
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
return DiagonalMatrix(self._s * other[..., nax])
def sum(self, axis):
assert axis >= 0
return DiagonalMatrix(self._s.sum(axis))
def logdet(self):
return np.log(self._s).sum(-1)
def alat(self, A):
return self.full().alat(A)
def rescale(self, a):
a = np.asarray(a)
return DiagonalMatrix(a[..., nax] ** 2 * self._s)
def conv(self, other):
if isinstance(other, DiagonalMatrix):
return DiagonalMatrix(1. / (1. / self._s + 1. / other._s))
elif isinstance(other, EyeMatrix):
return DiagonalMatrix(1. / (1. / self._s + 1. / other._s[..., nax]))
else:
return self.full().conv(other)
def sqrt_dot(self, x):
return np.sqrt(self._s) * x
def add_dummy_dimension(self):
return self.full().add_dummy_dimension()
def to_eig(self):
return self.full().to_eig()
@staticmethod
def random(shape, dim):
s = np.random.gamma(1., 1., size=shape+(dim,))
return DiagonalMatrix(s)
class EyeMatrix(BaseMatrix):
def __init__(self, s, dim):
BaseMatrix.__init__(self)
s = np.asarray(s)
self._s = s
self.shape = s.shape
self.ndim = len(self.shape)
self.dim = dim
self.shape_str = 's=%s' % str(s.shape)
def full(self):
return FullMatrix(self._s[..., nax, nax] * np.eye(self.dim))
def copy(self):
return EyeMatrix(self._s.copy(), self.dim)
def elt(self, i, j):
if i == j:
return self._s
else:
return np.zeros(self.shape)
def col(self, j):
result = np.zeros(self.shape + (self.dim,))
result[..., j] = self._s
return result
def __slice__(self, slc):
return EyeMatrix(self._s[slc], self.dim)
def __setslice__(self, slc, other):
if not self.mutable:
raise RuntimeError('Attempt to modify an immutable matrix')
if isinstance(other, EyeMatrix):
self._s[slc] = other._s
else:
raise RuntimeError('Cannot assign an EyeMatrix to a %s' % other.__class__)
def dot(self, x):
return self._s[..., nax] * x
def qform(self, x):
return (x ** 2).sum(-1) * self._s
def pinv(self):
return EyeMatrix(np.where(self._s > 0., 1. / self._s, 0.), self.dim)
def inv(self):
return EyeMatrix(1. / self._s, self.dim)
def __add__(self, other):
if isinstance(other, EyeMatrix):
return EyeMatrix(self._s + other._s, self.dim)
elif isinstance(other, FixedEigMatrix):
return other + self
elif isinstance(other, EigMatrix):
return other + self
elif isinstance(other, DiagonalMatrix):
return other + self
else:
return self.full() + other
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
return EyeMatrix(self._s * other, self.dim)
def sum(self, axis):
return EyeMatrix(self._s.sum(axis), self.dim)
def logdet(self):
return self.dim * np.log(self._s)
def alat(self, A):
## if A.shape[-1] == 1:
## assert self.dim == 1
## A_ = A[..., :, 0]
## Q = A_ / np.sqrt((A_ ** 2).sum(-1))[..., nax]
## d = (A_ ** 2).sum(-1) * self._s
## return FixedEigMatrix(d[..., nax], Q[..., nax], 0.)
return FullMatrix(dot(A, transp(A)) * self._s[..., nax, nax])
def rescale(self, a):
return EyeMatrix(a**2 * self._s, self.dim)
def conv(self, other):
if isinstance(other, EyeMatrix):
return EyeMatrix(1. / (1. / self._s + 1. / other._s), self.dim)
elif isinstance(other, EigMatrix):
return other.conv(self)
elif isinstance(other, FixedEigMatrix):
return other.conv(self)
else:
return self.full().conv(other)
def sqrt_dot(self, x):
return np.sqrt(self._s)[..., nax] * x
def add_dummy_dimension(self):
return self.full().add_dummy_dimension()
def to_eig(self):
return self.full().to_eig()
@staticmethod
def random(shape, dim):
s = np.random.gamma(1., 1., size=shape)
return EyeMatrix(s, dim)
def _x_QDQ_x(Q, d, x):
fsh = full_shape([x.shape[:-1], Q.shape, d.shape])
prod = np.zeros(fsh)
for full_idx in itertools.product(*map(range, fsh)):
Q_idx = broadcast(full_idx, Q.shape)
d_idx = broadcast(full_idx, d.shape)
x_idx = broadcast(full_idx, x.shape[:-1])
curr_d, curr_Q = d[d_idx], Q[Q_idx]
curr_x = x[x_idx + (slice(None),)]
curr_QTx = np.dot(curr_Q.T, curr_x)
prod[full_idx] = (curr_d * curr_QTx**2).sum()
return prod
def _QDQ_x(Q, d, x):
fsh = full_shape([x.shape[:-1], Q.shape, d.shape])
prod = np.zeros(fsh + (x.shape[-1],))
for full_idx in itertools.product(*map(range, fsh)):
Q_idx = broadcast(full_idx, Q.shape)
d_idx = broadcast(full_idx, d.shape)
x_idx = broadcast(full_idx, x.shape[:-1])
curr_d, curr_Q = d[d_idx], Q[Q_idx]
curr_x = x[x_idx + (slice(None),)]
curr_QTx = np.dot(curr_Q.T, curr_x)
prod[full_idx + (slice(None),)] = np.dot(curr_Q, curr_d * curr_QTx)
return prod
class EigMatrix(BaseMatrix):
def __init__(self, d, Q, s_perp, dim):
BaseMatrix.__init__(self)
d, Q, s_perp = match_shapes([('d', d, 0), ('Q', Q, 0), ('s_perp', s_perp, 0)])
self._d = d
self._Q = Q
self.dim = dim
self.ndim = d.ndim
self._s_perp = s_perp
self.shape = full_shape([d.shape, Q.shape, s_perp.shape])
self.shape_str = 'd=%s Q=%s s_perp=%s' % (d.shape, Q.shape, s_perp.shape)
def full(self):
S = np.zeros(full_shape([self._d.shape, self._Q.shape]) + (self.dim, self.dim))
for idx in itertools.product(*map(range, S.shape[:-2])):
d_idx, Q_idx = broadcast(idx, self._d.shape), broadcast(idx, self._Q.shape)
d, Q = self._d[d_idx], self._Q[Q_idx]
S[idx + (Ellipsis,)] = np.dot(Q*d, Q.T)
sp_idx = broadcast(idx, self._s_perp.shape)
sp = self._s_perp[sp_idx]
S[idx + (Ellipsis,)] += (np.eye(self.dim) - np.dot(Q, Q.T)) * sp
return FullMatrix(S)
def copy(self):
return EigMatrix(self._d.copy(), self._Q.copy(), self._s_perp.copy(), self.dim)
def elt(self, i, j):
# TODO: make this efficient
return self.col(j)[..., i]
def col(self, j):
# TODO: make this efficient
x = np.zeros((1,) * self.ndim + (self.dim,))
x[..., j] = 1
return self.dot(x)
def __slice__(self, slc):
d_slc = process_slice(slc, self._d.shape, 0)
Q_slc = process_slice(slc, self._Q.shape, 0)
sp_slc = process_slice(slc, self._s_perp.shape, 0)
if all([type(s) == int for s in slc]):
# NumPy doesn't like zero-dimensional object arrays
return FixedEigMatrix(self._d[d_slc], self._Q[Q_slc], self._s_perp[sp_slc])
else:
return EigMatrix(self._d[d_slc], self._Q[Q_slc], self._s_perp[sp_slc], self.dim)
def __setslice__(self, slc, other):
raise NotImplementedError()
def dot(self, x):
result = _QDQ_x(self._Q, self._d, x)
x_perp = x - _QDQ_x(self._Q, self._d**0., x)
result += x_perp * self._s_perp[..., nax]
return result
def qform(self, x):
result = _x_QDQ_x(self._Q, self._d, x)
x_perp = x - _QDQ_x(self._Q, self._d**0., x)
result += (x_perp ** 2).sum(-1) * self._s_perp
return result
def pinv(self):
new_s_perp = np.where(self._s_perp > 0., 1. / self._s_perp, 0.)
return EigMatrix(1. / self._d, self._Q, new_s_perp, self.dim)
def inv(self):
return EigMatrix(1. / self._d, self._Q, 1. / self._s_perp, self.dim)
def __add__(self, other):
if isinstance(other, EyeMatrix):
return EigMatrix(self._d + other._s, self._Q, self._s_perp + other._s, self.dim)
else:
return self.full() + other
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
return EigMatrix(other * self._d, self._Q, other * self._s_perp, self.dim)
def sum(self, axis):
return self.full().sum(axis)
def logdet(self):
d, s = self._d, self._s_perp
fsh = full_shape([d.shape, s.shape])
logdet = np.zeros(fsh)
for idx in itertools.product(*map(range, fsh)):
d_idx, s_idx = broadcast(idx, d.shape), broadcast(idx, s.shape)
logdet[idx] = np.log(d[d_idx]).sum() + \
(self.dim - d[d_idx].size) * np.log(s[s_idx])
return logdet
def alat(self, A):
return self.full().alat(A)
def rescale(self, a):
a = np.array(a)
return EigMatrix(a**2 * self._d, self._Q, a**2 * self._s_perp, self.dim)
def conv(self, other):
if isinstance(other, EyeMatrix):
s_perp_new = 1. / (1. / self._s_perp + 1. / other._s)
d_new = 1. / (1. / self._d + 1. / other._s)
return EigMatrix(d_new, self._Q, s_perp_new, self.dim)
else:
return self.full().conv(other)
def sqrt_dot(self, x):
ans_proj = _QDQ_x(self._Q, self._d ** 0.5, x)
perp = x - _QDQ_x(self._Q, self._d**0., x)
ans_perp = np.sqrt(self._s_perp) * perp
return ans_proj + ans_perp
def add_dummy_dimension(self):
return self.full().add_dummy_dimension()
def to_eig(self):
return self
@staticmethod
def random(d_shape, Q_shape, sp_shape, dim, low_rank=False):
s_perp = np.random.gamma(1., 1., size=sp_shape)
smsh = tuple(np.array([d_shape, Q_shape]).min(0))
if low_rank:
rank = np.random.randint(1, dim+1, size=smsh)
else:
rank = dim * np.ones(smsh, dtype=int)
d = np.zeros(d_shape, dtype=object)
for idx in itertools.product(*map(range, d_shape)):
sm_idx = broadcast(idx, smsh)
d[idx] = np.random.gamma(1., 1., size=rank[sm_idx])
Q = np.zeros(Q_shape, dtype=object)
for idx in itertools.product(*map(range, Q_shape)):
sm_idx = broadcast(idx, smsh)
Q[idx], _ = np.linalg.qr(np.random.normal(size=(dim, rank[sm_idx])))
return EigMatrix(d, Q, s_perp, dim)
class FixedEigMatrix(BaseMatrix):
def __init__(self, d, Q, s_perp):
BaseMatrix.__init__(self)
d, Q, s_perp = match_shapes([('d', d, 1), ('Q', Q, 2), ('s_perp', s_perp, 0)])
self._d = d
self._Q = Q
self.dim = Q.shape[-2]
self.rank = Q.shape[-1]
self.ndim = Q.ndim - 2
self.shape = full_shape([d.shape[:-1], Q.shape[:-2], s_perp.shape])
self._s_perp = s_perp
self.shape_str = 'd=%s Q=%s s_perp=%s' % (d.shape, Q.shape, s_perp.shape)
def full(self):
S = dot(self._Q, self._d[..., nax] * transp(self._Q))
S += (np.eye(self.dim) - dot(self._Q, transp(self._Q))) * self._s_perp[..., nax, nax]
return FullMatrix(S)
def copy(self):
return FixedEigMatrix(self._d.copy(), self._Q.copy(), self._s_perp.copy())
def elt(self, i, j):
# TODO: make this efficient
return self.col(j)[..., i]
def col(self, j):
# TODO: make this efficient
x = np.zeros((1,) * self.ndim + (self.dim,))
x[..., j] = 1
return self.dot(x)
def __slice__(self, slc):
d_slc = process_slice(slc, self._d.shape, 1)
Q_slc = process_slice(slc, self._Q.shape, 2)
sp_slc = process_slice(slc, self._s_perp.shape, 0)
return FixedEigMatrix(self._d[d_slc], self._Q[Q_slc], self._s_perp[sp_slc])
def __setslice__(self, slc, other):
raise NotImplementedError()
def dot(self, x):
result = dot(self._Q, self._d * dot(transp(self._Q), x))
x_perp = x - dot(self._Q, dot(transp(self._Q), x))
result += x_perp * self._s_perp[..., nax]
return result
def qform(self, x):
result = (self._d * dot(transp(self._Q), x) ** 2).sum(-1)
x_perp = x - dot(self._Q, dot(transp(self._Q), x))
result += (x_perp ** 2).sum(-1) * self._s_perp
return result
def pinv(self):
new_s_perp = np.where(self._s_perp > 0., 1. / self._s_perp, 0.)
return FixedEigMatrix(1. / self._d, self._Q, new_s_perp)
def inv(self):
return FixedEigMatrix(1. / self._d, self._Q, 1. / self._s_perp)
def __add__(self, other):
if isinstance(other, EyeMatrix):
return FixedEigMatrix(self._d + other._s[..., nax], self._Q, self._s_perp + other._s)
else:
return self.full() + other
def __sub__(self, other):
return self + other * -1
def __mul__(self, other):
return FixedEigMatrix(other[..., nax] * self._d, self._Q, other * self._s_perp)
def sum(self, axis):
return self.full().sum(axis)
def logdet(self):
return np.log(self._d).sum(-1) + (self.dim - self.rank) * np.log(self._s_perp)
def alat(self, A):
return self.full().alat(A)
def rescale(self, a):
a = np.array(a)
return FixedEigMatrix(a[..., nax]**2 * self._d, self._Q, a**2 * self._s_perp)
def conv(self, other):
if isinstance(other, EyeMatrix):
s_perp_new = 1. / (1. / self._s_perp + 1. / other._s)
d_new = 1. / (1. / self._d + 1. / other._s[..., nax])
return FixedEigMatrix(d_new, self._Q, s_perp_new)
else:
return self.full().conv(other)
def sqrt_dot(self, x):
result = dot(self._Q, np.sqrt(self._d) * dot(transp(self._Q), x))
x_perp = x - dot(self._Q, dot(transp(self._Q), x))
result += x_perp * np.sqrt(self._s_perp[..., nax])
return result
def add_dummy_dimension(self):
return self.full().add_dummy_dimension()
def to_eig(self):
return self
@staticmethod
def random(d_shape, Q_shape, sp_shape, dim, rank=None):
if rank is None:
rank = dim
ndim = len(d_shape)
temp = np.random.normal(size=Q_shape + (dim, rank))
Q, _ = array_map(np.linalg.qr, [temp], ndim)
d = np.random.gamma(1., 1., size=d_shape + (rank,))
sp = np.random.gamma(1., 1., size=sp_shape)
return FixedEigMatrix(d, Q, sp)
def proj_psd(H):
'''
Makes stuff psd I presume? Comments welcome.
'''
assert np.allclose(H, H.T), 'not symmetric'
d, Q = scipy.linalg.eigh(H)
d = np.clip(d, 1e-8, np.infty)
return np.dot(Q, d[:, nax] * Q.T)
def laplace_approx(nll, opt_hyper, hessian, prior_var=100000):
#### FIXME - Believed to have a bug
#### - Might be MATLAB though - test this code on some known integrals
d = opt_hyper.size
if not np.allclose(hessian, proj_psd(hessian)):
# Hessian not PSD - cannot use in Laplace approx
#### TODO - Any job controller should attempt to re-try optimisation
return np.nan
# quadratic centered at opt_hyper with maximum -nll
evidence = gaussians.Potential(np.zeros(d), FullMatrix(hessian), -nll)
evidence = evidence.translate(opt_hyper)
# zero-centered Gaussian
prior = gaussians.Potential.from_moments_iso(np.zeros(d), prior_var)
# multiply the two Gaussians and integrate the result
return -(evidence + prior).integral()
def laplace_approx_no_prior(nll, opt_hyper, hessian, prior_var=100000):
#### FIXME - Believed to have a bug
#### - Might be MATLAB though - test this code on some known integrals
d = opt_hyper.size
if not np.allclose(hessian, proj_psd(hessian)):
# Hessian not PSD - cannot use in Laplace approx
#### TODO - Any job controller should attempt to re-try optimisation
return np.nan
evidence = gaussians.Potential(np.zeros(d), FullMatrix(hessian), -nll)
return -evidence.integral()
def laplace_approx_stable(nll, opt_hyper, hessian, prior_var=100000):
H_eig, Q = scipy.linalg.eigh(hessian)
# in these cases, this function should still work, but an ill-conditioned
# or non-PSD Hessian is a sign there's a bug somewhere upstream
problems = []
if np.max(H_eig) > 1e10:
problems.append('ill-conditioned')
if np.min(H_eig) < 0.:
problems.append('not PSD')
# treat negative and very small values as zero
is_zero = (H_eig < 1e-10)
# determinant term
temp = np.where(is_zero, 1., (1. / H_eig) / (prior_var + 1. / H_eig))
total = 0.5 * np.sum(np.log(temp))
# term inside the exp
opt_hyper_orth = np.dot(Q.T, opt_hyper)
temp = np.where(is_zero, 0., opt_hyper_orth ** 2 / (prior_var + 1. / H_eig))
total += -0.5 * np.sum(temp)
total += -nll
return -total, problems
def laplace_approx_stable_no_prior(nll, hessian):
H_eig, Q = scipy.linalg.eigh(hessian)
problems = []
if (np.min(H_eig) < -(1e-8)*np.max(H_eig)) or (np.max(H_eig) <= 0):
# Check for non-trivially small negative eigenvalues
neg_marg_lik = np.nan
problems.append('Not PSD')
return neg_marg_lik, problems
else:
# treat very small values as zero - i.e. computing the pseudo-determinant
is_zero = (H_eig < (1e-8)*np.max(H_eig))
# compute integral
temp = np.where(is_zero, 1., (2*np.pi / H_eig))
neg_marg_lik = nll - 0.5 * np.sum(np.log(temp))
return neg_marg_lik, problems
def check_laplace_approx():
D = 5
nll = np.random.normal()
opt_hyper = np.random.normal(size=D)
A = np.random.normal(size=(10, 5))
hessian = np.dot(A.T, A)
prior_var = np.random.uniform(1., 2.)
ans1 = laplace_approx(nll, opt_hyper, hessian, prior_var)
ans2 = laplace_approx_stable(nll, opt_hyper, hessian, prior_var)[0]
assert np.allclose(ans1, ans2)
|
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
from PyQt4 import QtCore, QtGui
class StateSwitchEvent(QtCore.QEvent):
StateSwitchType = QtCore.QEvent.User + 256
def __init__(self, rand=0):
super(StateSwitchEvent, self).__init__(StateSwitchEvent.StateSwitchType)
self.m_rand = rand
def rand(self):
return self.m_rand
class StateSwitchTransition(QtCore.QAbstractTransition):
def __init__(self, rand):
super(StateSwitchTransition, self).__init__()
self.m_rand = rand
def eventTest(self, event):
return (event.type() == StateSwitchEvent.StateSwitchType and
event.rand() == self.m_rand)
def onTransition(self, event):
pass
class StateSwitcher(QtCore.QState):
def __init__(self, machine):
super(StateSwitcher, self).__init__(machine)
self.m_stateCount = 0
self.m_lastIndex = 1
def onEntry(self, event):
self.m_lastIndex = 1 if self.m_lastIndex >= self.m_stateCount else self.m_lastIndex + 1
self.machine().postEvent(StateSwitchEvent(self.m_lastIndex))
def addState(self, state, animation):
self.m_stateCount += 1
trans = StateSwitchTransition(self.m_stateCount)
trans.setTargetState(state)
trans.addAnimation(animation)
self.addTransition(trans)
def createGeometryState(w1, rect1, w2, rect2, w3, rect3, w4, rect4, w5, rect5, w6, rect6, parent):
result = QtCore.QState(parent)
result.assignProperty(w1, 'geometry', rect1)
result.assignProperty(w1, 'geometry', rect1)
result.assignProperty(w2, 'geometry', rect2)
result.assignProperty(w3, 'geometry', rect3)
result.assignProperty(w4, 'geometry', rect4)
result.assignProperty(w5, 'geometry', rect5)
result.assignProperty(w6, 'geometry', rect6)
return result
@QtCore.pyqtSlot()
def on_pushButtonStart_clicked():
lastItem = None
lowestY = None
for item in scene.items():
itemY = item.pos().y()
if lastItem == None or itemY <= lowestY:
lowestY = itemY
lastItem = item
for item in scene.items():
if item != lastItem:
lastItem.stackBefore(item)
timer.start()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
button1 = QtGui.QPushButton()
button1.setFixedSize(QtCore.QSize(50, 50))
button1.setText("#1")
button2 = QtGui.QPushButton()
button2.setFixedSize(QtCore.QSize(50, 50))
button2.setText("#2")
button3 = QtGui.QPushButton()
button3.setFixedSize(QtCore.QSize(50, 50))
button3.setText("#3")
button4 = QtGui.QPushButton()
button4.setFixedSize(QtCore.QSize(50, 50))
button4.setText("#4")
button5 = QtGui.QPushButton()
button5.setFixedSize(QtCore.QSize(50, 50))
button5.setText("#5")
button6 = QtGui.QPushButton()
button6.setFixedSize(QtCore.QSize(50, 50))
button6.setText("#6")
scene = QtGui.QGraphicsScene(0, 0, 50, 300)
scene.addWidget(button1)
scene.addWidget(button2)
scene.addWidget(button3)
scene.addWidget(button4)
scene.addWidget(button5)
scene.addWidget(button6)
timer = QtCore.QTimer()
timer.setInterval(0)
timer.setSingleShot(True)
group = QtCore.QState()
state1 = createGeometryState(
button1, QtCore.QRect(0, 0, 50, 50),
button2, QtCore.QRect(0, 50, 50, 50),
button3, QtCore.QRect(0, 100, 50, 50),
button4, QtCore.QRect(0, 150, 50, 50),
button5, QtCore.QRect(0, 200, 50, 50),
button6, QtCore.QRect(0, 250, 50, 50),
group
)
state2 = createGeometryState(
button2, QtCore.QRect(0, 0, 50, 50),
button3, QtCore.QRect(0, 50, 50, 50),
button4, QtCore.QRect(0, 100, 50, 50),
button5, QtCore.QRect(0, 150, 50, 50),
button6, QtCore.QRect(0, 200, 50, 50),
button1, QtCore.QRect(0, 250, 50, 50),
group
)
state3 = createGeometryState(
button3, QtCore.QRect(0, 0, 50, 50),
button4, QtCore.QRect(0, 50, 50, 50),
button5, QtCore.QRect(0, 100, 50, 50),
button6, QtCore.QRect(0, 150, 50, 50),
button1, QtCore.QRect(0, 200, 50, 50),
button2, QtCore.QRect(0, 250, 50, 50),
group
)
state4 = createGeometryState(
button4, QtCore.QRect(0, 0, 50, 50),
button5, QtCore.QRect(0, 50, 50, 50),
button6, QtCore.QRect(0, 100, 50, 50),
button1, QtCore.QRect(0, 150, 50, 50),
button2, QtCore.QRect(0, 200, 50, 50),
button3, QtCore.QRect(0, 250, 50, 50),
group
)
state5 = createGeometryState(
button5, QtCore.QRect(0, 0, 50, 50),
button6, QtCore.QRect(0, 50, 50, 50),
button1, QtCore.QRect(0, 100, 50, 50),
button2, QtCore.QRect(0, 150, 50, 50),
button3, QtCore.QRect(0, 200, 50, 50),
button4, QtCore.QRect(0, 250, 50, 50),
group
)
state6 = createGeometryState(
button6, QtCore.QRect(0, 0, 50, 50),
button1, QtCore.QRect(0, 50, 50, 50),
button2, QtCore.QRect(0, 100, 50, 50),
button3, QtCore.QRect(0, 150, 50, 50),
button4, QtCore.QRect(0, 200, 50, 50),
button5, QtCore.QRect(0, 250, 50, 50),
group
)
animationGroup = QtCore.QParallelAnimationGroup()
anim = QtCore.QPropertyAnimation(button6, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
anim = QtCore.QPropertyAnimation(button5, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
anim = QtCore.QPropertyAnimation(button4, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
anim = QtCore.QPropertyAnimation(button3, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
anim = QtCore.QPropertyAnimation(button2, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
anim = QtCore.QPropertyAnimation(button1, 'geometry')
anim.setDuration(1111)
anim.setEasingCurve(QtCore.QEasingCurve.OutElastic)
animationGroup.addAnimation(anim)
machine = QtCore.QStateMachine()
stateSwitcher = StateSwitcher(machine)
stateSwitcher.addState(state1, animationGroup)
stateSwitcher.addState(state2, animationGroup)
stateSwitcher.addState(state3, animationGroup)
stateSwitcher.addState(state4, animationGroup)
stateSwitcher.addState(state5, animationGroup)
stateSwitcher.addState(state6, animationGroup)
group.addTransition(timer.timeout, stateSwitcher)
group.setInitialState(state1)
machine.addState(group)
machine.setInitialState(group)
machine.start()
view = QtGui.QGraphicsView(scene)
view.setAlignment(QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
view.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
view.setFixedSize(50, 150)
window = QtGui.QWidget()
pushButtonStart = QtGui.QPushButton(window)
pushButtonStart.setText("Start")
pushButtonStart.clicked.connect(on_pushButtonStart_clicked)
layoutVertical = QtGui.QVBoxLayout(window)
layoutVertical.addWidget(view)
layoutVertical.addWidget(pushButtonStart)
window.show()
window.resize(333, 150)
sys.exit(app.exec_())
|
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from mock import patch
from nailgun.test.base import BaseTestCase
from nailgun import consts
from nailgun.objects import Cluster
from nailgun.objects import OpenStackWorkloadStats
from nailgun.statistics.oswl.collector import collect as oswl_collect_once
from nailgun.statistics.oswl.collector import run as run_collecting
class TestOSWLCollector(BaseTestCase):
vms_info = [{
"id": 1,
"status": "running",
}]
def collect_for_operational_cluster(self, get_info_mock):
cluster = self.env.create_cluster(
api=False,
status=consts.CLUSTER_STATUSES.operational
)
cls_id = cluster.id
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
upd_time = last.updated_time
res_data = {
'added': [{'time': upd_time.isoformat(), 'id': 1}],
'removed': [],
'modified': [],
'current': self.vms_info}
self.assertEqual(last.resource_data, res_data)
return cls_id, res_data
def update_cluster_status_and_oswl_data(self, cls_id, status):
cls = Cluster.get_by_uid(cls_id)
Cluster.update(cls, {'status': status})
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
return OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_skip_collection_for_errorful_cluster(self, get_info_mock, *_):
error_cluster = self.env.create(
api=False,
nodes_kwargs=[{"roles": ["controller"], "online": False}],
cluster_kwargs={"name": "error",
"status": consts.CLUSTER_STATUSES.operational}
)
normal_cluster = self.env.create(
api=False,
nodes_kwargs=[{"roles": ["controller"], "online": True}],
cluster_kwargs={"name": "normal",
"status": consts.CLUSTER_STATUSES.operational}
)
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last_for_error_clsr = OpenStackWorkloadStats.get_last_by(
error_cluster["id"], consts.OSWL_RESOURCE_TYPES.vm)
self.assertIsNone(last_for_error_clsr)
last_for_normal_clsr = OpenStackWorkloadStats.get_last_by(
normal_cluster["id"], consts.OSWL_RESOURCE_TYPES.vm)
self.assertIsNotNone(last_for_normal_clsr)
upd_time = last_for_normal_clsr.updated_time
res_data = {
'added': [{'time': upd_time.isoformat(), 'id': 1}],
'removed': [],
'modified': [],
'current': self.vms_info}
self.assertEqual(last_for_normal_clsr.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_clear_data_for_changed_cluster(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
last = self.update_cluster_status_and_oswl_data(
cls_id, consts.CLUSTER_STATUSES.error)
# nothing is changed while cluster is in error status
self.assertEqual(last.resource_data, res_data)
last = self.update_cluster_status_and_oswl_data(
cls_id, consts.CLUSTER_STATUSES.remove)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
res_data.update({
'removed': [removed],
'current': []})
# current data is cleared when cluster status is changed
self.assertEqual(last.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_clear_data_for_removed_cluster(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
cls = Cluster.get_by_uid(cls_id)
Cluster.delete(cls)
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
res_data.update({
'removed': [removed],
'current': []})
# current data is cleared when cluster is deleted
self.assertEqual(last.resource_data, res_data)
@patch('nailgun.statistics.oswl.collector.utils.get_proxy_for_cluster')
@patch('nailgun.statistics.oswl.collector.utils.set_proxy')
@patch('nailgun.statistics.oswl.collector.helpers.ClientProvider')
@patch('nailgun.statistics.oswl.collector.helpers.'
'get_info_from_os_resource_manager')
def test_removed_several_times(self, get_info_mock, *_):
cls_id, res_data = self.collect_for_operational_cluster(get_info_mock)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertItemsEqual(self.vms_info, last.resource_data['current'])
# reset cluster
get_info_mock.return_value = []
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed = dict(self.vms_info[0])
removed['time'] = last.updated_time.isoformat()
removed_data = [removed]
# check data is not duplicated in removed on several collects
for _ in xrange(10):
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(removed_data, last.resource_data['removed'])
# cluster is operational
# checking 'removed' is don't changed
get_info_mock.return_value = self.vms_info
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
self.assertEqual(removed_data, last.resource_data['removed'])
# reset cluster again
# checking only id and time added to 'removed'
get_info_mock.return_value = []
oswl_collect_once(consts.OSWL_RESOURCE_TYPES.vm)
last = OpenStackWorkloadStats.get_last_by(
cls_id, consts.OSWL_RESOURCE_TYPES.vm)
removed_data.append({
'id': removed_data[0]['id'],
'time': last.updated_time.isoformat()
})
self.assertListEqual(removed_data, last.resource_data['removed'])
@patch("nailgun.statistics.oswl.collector.time.sleep",
side_effect=StopIteration)
@patch.object(sys, "argv", new=["_", consts.OSWL_RESOURCE_TYPES.vm])
def test_oswl_is_not_collected_when_stats_collecting_disabled(self, *_):
collect_func_path = ("nailgun.statistics.oswl.collector.collect")
must_send_stats_path = ("nailgun.statistics.oswl.collector"
".MasterNodeSettings.must_send_stats")
with patch(must_send_stats_path, return_value=False):
with patch(collect_func_path) as collect_mock:
try:
run_collecting()
except StopIteration:
pass
self.assertFalse(collect_mock.called)
with patch(must_send_stats_path, return_value=True):
with patch(collect_func_path) as collect_mock:
try:
run_collecting()
except StopIteration:
pass
self.assertTrue(collect_mock.called)
|
|
from __future__ import division
import numpy as np
from functools import partial
import warnings
from menpo.feature import no_op
from menpo.base import name_of_callable
from menpofit.visualize import print_progress
from menpofit.base import batch
from menpofit.builder import (scale_images, rescale_images_to_reference_shape,
compute_reference_shape, MenpoFitBuilderWarning,
compute_features)
from menpofit.fitter import (MultiScaleNonParametricFitter,
noisy_shape_from_bounding_box,
align_shape_with_bounding_box,
generate_perturbations_from_gt)
import menpofit.checks as checks
from .algorithm import NonParametricNewton
class SupervisedDescentFitter(MultiScaleNonParametricFitter):
r"""
Class for training a multi-scale Supervised Descent model.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that corresponds to the ground truth shape of each
image. If ``None`` and the images only have a single landmark group,
then that is the one that will be used. Note that all the training
images need to have the specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are used.
sd_algorithm_cls : `class`, optional
The Supervised Descent algorithm to be used. The possible algorithms
are are separated in the following four categories:
**Non-parametric:**
===================================== ==============================
Class Regression
===================================== ==============================
:map:`NonParametricNewton` :map:`IRLRegression`
:map:`NonParametricGaussNewton` :map:`IIRLRegression`
:map:`NonParametricPCRRegression` :map:`PCRRegression`
:map:`NonParametricOptimalRegression` :map:`OptimalLinearRegression`
:map:`NonParametricOPPRegression` :map:`OPPRegression`
===================================== ==============================
**Parametric shape:**
======================================= ===================================
Class Regression
======================================= ===================================
:map:`ParametricShapeNewton` :map:`IRLRegression`
:map:`ParametricShapeGaussNewton` :map:`IIRLRegression`
:map:`ParametricShapePCRRegression` :map:`PCRRegression`
:map:`ParametricShapeOptimalRegression` :map:`OptimalLinearRegression`
:map:`ParametricShapeOPPRegression` :map:`ParametricShapeOPPRegression`
======================================= ===================================
**Parametric appearance:**
================================================== =====================
Class Regression
================================================== =====================
:map:`ParametricAppearanceProjectOutNewton` :map:`IRLRegression`
:map:`ParametricAppearanceProjectOutGuassNewton` :map:`IIRLRegression`
:map:`ParametricAppearanceMeanTemplateNewton` :map:`IRLRegression`
:map:`ParametricAppearanceMeanTemplateGuassNewton` :map:`IIRLRegression`
:map:`ParametricAppearanceWeightsNewton` :map:`IRLRegression`
:map:`ParametricAppearanceWeightsGuassNewton` :map:`IIRLRegression`
================================================== =====================
**Parametric shape and appearance:**
=========================================== =====================
Class Regression
=========================================== =====================
:map:`FullyParametricProjectOutNewton` :map:`IRLRegression`
:map:`FullyParametricProjectOutGaussNewton` :map:`IIRLRegression`
:map:`FullyParametricMeanTemplateNewton` :map:`IRLRegression`
:map:`FullyParametricWeightsNewton` :map:`IRLRegression`
:map:`FullyParametricProjectOutOPP` :map:`OPPRegression`
=========================================== =====================
reference_shape : `menpo.shape.PointCloud` or ``None``, optional
The reference shape that will be used for normalising the size of the
training images. The normalization is performed by rescaling all the
training images so that the scale of their ground truth shapes
matches the scale of the reference shape. Note that the reference
shape is rescaled with respect to the `diagonal` before performing
the normalisation. If ``None``, then the mean shape will be used.
diagonal : `int` or ``None``, optional
This parameter is used to rescale the reference shape so that the
diagonal of its bounding box matches the provided value. In other
words, this parameter controls the size of the model at the highest
scale. If ``None``, then the reference shape does not get rescaled.
holistic_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the training images. Note
that the features are extracted before warping the images to the
reference shape. If `list`, then it must define a feature function per
scale. Please refer to `menpo.feature` for a list of potential features.
patch_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the patches of the training
images. Note that, as opposed to `holistic_features`, these features
are extracted after extracting the patches. If `list`, then it must
define a feature function per scale. Please refer to `menpo.feature`
and `menpofit.feature` for a list of potential features.
patch_shape : (`int`, `int`) or `list` of (`int`, `int`), optional
The shape of the patches to be extracted. If a `list` is provided,
then it defines a patch shape per scale.
scales : `float` or `tuple` of `float`, optional
The scale value of each scale. They must provided in ascending order,
i.e. from lowest to highest scale. If `float`, then a single scale is
assumed.
n_iterations : `int` or `list` of `int`, optional
The number of iterations (cascades) of each level. If `list`, it must
specify a value per scale. If `int`, then it defines the total number of
iterations (cascades) over all scales.
n_perturbations : `int`, optional
The number of perturbations to be generated from each of the bounding
boxes using `perturb_from_gt_bounding_box`.
perturb_from_gt_bounding_box : `callable`, optional
The function that will be used to generate the perturbations from each
of the bounding boxes.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
verbose : `bool`, optional
If ``True``, then the progress of the training will be printed.
References
----------
.. [1] X. Xiong, and F. De la Torre. "Supervised Descent Method and its
applications to face alignment", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2013.
.. [2] P. N. Belhumeur, D. W. Jacobs, D. J. Kriegman, and N. Kumar.
"Localizing parts of faces using a consensus of exemplars", Proceedings
of the IEEE Conference on Computer Vision and Pattern Recognition
(CVPR), 2011.
"""
def __init__(self, images, group=None, bounding_box_group_glob=None,
sd_algorithm_cls=None, reference_shape=None, diagonal=None,
holistic_features=no_op, patch_features=no_op,
patch_shape=(17, 17), scales=(0.5, 1.0), n_iterations=3,
n_perturbations=30,
perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
batch_size=None, verbose=False):
if batch_size is not None:
raise NotImplementedError('Training an SDM with a batch size '
'(incrementally) is not implemented yet.')
# Check parameters
checks.check_diagonal(diagonal)
scales = checks.check_scales(scales)
n_scales = len(scales)
patch_features = checks.check_callable(patch_features, n_scales)
sd_algorithm_cls = checks.check_callable(sd_algorithm_cls, n_scales)
holistic_features = checks.check_callable(holistic_features, n_scales)
patch_shape = checks.check_patch_shape(patch_shape, n_scales)
# Call superclass
super(SupervisedDescentFitter, self).__init__(
scales=scales, reference_shape=reference_shape,
holistic_features=holistic_features, algorithms=[])
# Set parameters
self._sd_algorithm_cls = sd_algorithm_cls
self.patch_features = patch_features
self.patch_shape = patch_shape
self.diagonal = diagonal
self.n_perturbations = n_perturbations
self.n_iterations = checks.check_max_iters(n_iterations, n_scales)
self._perturb_from_gt_bounding_box = perturb_from_gt_bounding_box
# Set up algorithms
self._setup_algorithms()
# Now, train the model!
self._train(images, increment=False, group=group,
bounding_box_group_glob=bounding_box_group_glob,
verbose=verbose, batch_size=batch_size)
def _setup_algorithms(self):
self.algorithms = [self._sd_algorithm_cls[j](
patch_features=self.patch_features[j],
patch_shape=self.patch_shape[j], n_iterations=self.n_iterations[j])
for j in range(self.n_scales)]
def _train(self, images, increment=False, group=None,
bounding_box_group_glob=None, verbose=False, batch_size=None):
# If batch_size is not None, then we may have a generator, else we
# assume we have a list.
if batch_size is not None:
# Create a generator of fixed sized batches. Will still work even
# on an infinite list.
image_batches = batch(images, batch_size)
else:
image_batches = [list(images)]
for k, image_batch in enumerate(image_batches):
if k == 0:
if self.reference_shape is None:
# If no reference shape was given, use the mean of the first
# batch
if batch_size is not None:
warnings.warn('No reference shape was provided. The '
'mean of the first batch will be the '
'reference shape. If the batch mean is '
'not representative of the true mean, '
'this may cause issues.',
MenpoFitBuilderWarning)
self._reference_shape = compute_reference_shape(
[i.landmarks[group].lms for i in image_batch],
self.diagonal, verbose=verbose)
# We set landmarks on the images to archive the perturbations, so
# when the default 'None' is used, we need to grab the actual
# label to sort out the ambiguity
if group is None:
group = image_batch[0].landmarks.group_labels[0]
# After the first batch, we are incrementing the model
if k > 0:
increment = True
if verbose:
print('Computing batch {}'.format(k))
# Train each batch
self._train_batch(
image_batch, increment=increment, group=group,
bounding_box_group_glob=bounding_box_group_glob,
verbose=verbose)
def _train_batch(self, image_batch, increment=False, group=None,
bounding_box_group_glob=None, verbose=False):
# Rescale images wrt the scale factor between the existing
# reference_shape and their ground truth (group) shapes
image_batch = rescale_images_to_reference_shape(
image_batch, group, self.reference_shape,
verbose=verbose)
# Create a callable that generates perturbations of the bounding boxes
# of the provided images.
generated_bb_func = generate_perturbations_from_gt(
image_batch, self.n_perturbations,
self._perturb_from_gt_bounding_box, gt_group=group,
bb_group_glob=bounding_box_group_glob, verbose=verbose)
# For each scale (low --> high)
for j in range(self.n_scales):
# Print progress if asked
if verbose:
if len(self.scales) > 1:
scale_prefix = ' - Scale {}: '.format(j)
else:
scale_prefix = ' - '
else:
scale_prefix = None
# Extract features. Features are extracted only if we are at the
# first scale or if the features of the current scale are different
# than the ones extracted at the previous scale.
if j == 0 and self.holistic_features[j] == no_op:
# Saves a lot of memory
feature_images = image_batch
elif (j == 0 or
self.holistic_features[j] != self.holistic_features[j - 1]):
# Compute features only if this is the first pass through
# the loop or the features at this scale are different from
# the features at the previous scale
feature_images = compute_features(image_batch,
self.holistic_features[j],
prefix=scale_prefix,
verbose=verbose)
# Rescale images according to scales. Note that scale_images is smart
# enough in order not to rescale the images if the current scale
# factor equals to 1.
scaled_images, scale_transforms = scale_images(
feature_images, self.scales[j], prefix=scale_prefix,
return_transforms=True, verbose=verbose)
# Extract scaled ground truth shapes for current scale
scaled_shapes = [i.landmarks[group].lms for i in scaled_images]
# Get shape estimations of current scale. If we are at the first
# scale, this is done by aligning the reference shape with the
# perturbed bounding boxes. If we are at the rest of the scales,
# then the current shapes are attached on the scaled_images with
# key '__sdm_current_shape_{}'.
current_shapes = []
if j == 0:
# At the first scale, the current shapes are created by aligning
# the reference shape to the perturbed bounding boxes.
msg = '{}Aligning reference shape with bounding boxes.'.format(
scale_prefix)
wrap = partial(print_progress, prefix=msg,
end_with_newline=False, verbose=verbose)
# Extract perturbations at the very bottom level
for ii in wrap(scaled_images):
c_shapes = []
for bbox in generated_bb_func(ii):
c_s = align_shape_with_bounding_box(
self.reference_shape, bbox)
c_shapes.append(c_s)
current_shapes.append(c_shapes)
else:
# At the rest of the scales, extract the current shapes that
# were attached to the images
msg = '{}Extracting shape estimations from previous ' \
'scale.'.format(scale_prefix)
wrap = partial(print_progress, prefix=msg,
end_with_newline=False, verbose=verbose)
for ii in wrap(scaled_images):
c_shapes = []
for k in list(range(self.n_perturbations)):
c_key = '__sdm_current_shape_{}'.format(k)
c_shapes.append(ii.landmarks[c_key].lms)
current_shapes.append(c_shapes)
# Train supervised descent algorithm. This returns the shape
# estimations for the next scale.
if not increment:
current_shapes = self.algorithms[j].train(
scaled_images, scaled_shapes, current_shapes,
prefix=scale_prefix, verbose=verbose)
else:
current_shapes = self.algorithms[j].increment(
scaled_images, scaled_shapes, current_shapes,
prefix=scale_prefix, verbose=verbose)
# Scale the current shape estimations for the next level. This
# doesn't have to be done for the last scale. The only thing we need
# to do at the last scale is to remove any attached landmarks from
# the training images.
if j < (self.n_scales - 1):
if self.holistic_features[j + 1] != self.holistic_features[j]:
# Features will be extracted, thus attach current_shapes on
# the training images (image_batch)
for jj, image_shapes in enumerate(current_shapes):
for k, shape in enumerate(image_shapes):
c_key = '__sdm_current_shape_{}'.format(k)
image_batch[jj].landmarks[c_key] = \
scale_transforms[jj].apply(shape)
else:
# Features won't be extracted;. the same feature_images will
# be used for the next scale, thus attach current_shapes on
# them.
for jj, image_shapes in enumerate(current_shapes):
for k, shape in enumerate(image_shapes):
c_key = '__sdm_current_shape_{}'.format(k)
feature_images[jj].landmarks[c_key] = \
scale_transforms[jj].apply(shape)
else:
# Check if original training image (image_batch) got some current
# shape estimations attached. If yes, delete them.
if '__sdm_current_shape_0' in image_batch[0].landmarks:
for image in image_batch:
for k in list(range(self.n_perturbations)):
c_key = '__sdm_current_shape_{}'.format(k)
del image.landmarks[c_key]
def increment(self, images, group=None, bounding_box_group_glob=None,
verbose=False, batch_size=None):
r"""
Method to increment the trained SDM with a new set of training images.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that corresponds to the ground truth shape of
each image. If ``None`` and the images only have a single
landmark group, then that is the one that will be used. Note that
all the training images need to have the specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are
used.
verbose : `bool`, optional
If ``True``, then the progress of training will be printed.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
"""
raise NotImplementedError('Incrementing SDM methods is not yet '
'implemented as careful attention must '
'be taken when considering the relationships '
'between cascade levels.')
def _fitter_result(self, image, algorithm_results, affine_transforms,
scale_transforms, gt_shape=None):
r"""
Function the creates the multi-scale fitting result object.
Parameters
----------
image : `menpo.image.Image` or subclass
The image that was fitted.
algorithm_results : `list` of :map:`NonParametricIterativeResult` or subclass
The list of fitting result per scale.
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that are the inverses of the
transformations introduced by the rescale wrt the reference shape as
well as the feature extraction.
scale_transforms : `list` of `menpo.shape.Scale`
The list of inverse scaling transforms per scale.
gt_shape : `menpo.shape.PointCloud`, optional
The ground truth shape associated to the image.
Returns
-------
fitting_result : :map:`MultiScaleNonParametricIterativeResult` or subclass
The multi-scale fitting result containing the result of the fitting
procedure.
"""
return self.algorithms[0]._multi_scale_fitter_result(
results=algorithm_results, scales=self.scales,
affine_transforms=affine_transforms,
scale_transforms=scale_transforms, image=image, gt_shape=gt_shape)
def __str__(self):
if self.diagonal is not None:
diagonal = self.diagonal
else:
y, x = self.reference_shape.range()
diagonal = np.sqrt(x ** 2 + y ** 2)
is_custom_perturb_func = (self._perturb_from_gt_bounding_box !=
noisy_shape_from_bounding_box)
if is_custom_perturb_func:
is_custom_perturb_func = name_of_callable(
self._perturb_from_gt_bounding_box)
regressor_cls = self.algorithms[0]._regressor_cls
# Compute scale info strings
scales_info = []
lvl_str_tmplt = r""" - Scale {}
- {} iterations
- Patch shape: {}
- Holistic feature: {}
- Patch feature: {}"""
for k, s in enumerate(self.scales):
scales_info.append(lvl_str_tmplt.format(
s, self.n_iterations[k], self.patch_shape[k],
name_of_callable(self.holistic_features[k]),
name_of_callable(self.patch_features[k])))
scales_info = '\n'.join(scales_info)
cls_str = r"""Supervised Descent Method
- Regression performed using the {reg_alg} algorithm
- Regression class: {reg_cls}
- Perturbations generated per shape: {n_perturbations}
- Images scaled to diagonal: {diagonal:.2f}
- Custom perturbation scheme used: {is_custom_perturb_func}
- Scales: {scales}
{scales_info}
""".format(
reg_alg=name_of_callable(self._sd_algorithm_cls[0]),
reg_cls=name_of_callable(regressor_cls),
n_perturbations=self.n_perturbations,
diagonal=diagonal,
is_custom_perturb_func=is_custom_perturb_func,
scales=self.scales,
scales_info=scales_info)
return cls_str
# *
# ************************* Non-Parametric Fitters *****************************
# *
# Aliases for common combinations of supervised descent fitting
class SDM(SupervisedDescentFitter):
r"""
Class for training a non-parametric multi-scale Supervised Descent model
using :map:`NonParametricNewton`.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that corresponds to the ground truth shape of each
image. If ``None`` and the images only have a single landmark group,
then that is the one that will be used. Note that all the training
images need to have the specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are used.
reference_shape : `menpo.shape.PointCloud` or ``None``, optional
The reference shape that will be used for normalising the size of the
training images. The normalization is performed by rescaling all the
training images so that the scale of their ground truth shapes
matches the scale of the reference shape. Note that the reference
shape is rescaled with respect to the `diagonal` before performing
the normalisation. If ``None``, then the mean shape will be used.
diagonal : `int` or ``None``, optional
This parameter is used to rescale the reference shape so that the
diagonal of its bounding box matches the provided value. In other
words, this parameter controls the size of the model at the highest
scale. If ``None``, then the reference shape does not get rescaled.
holistic_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the training images. Note
that the features are extracted before warping the images to the
reference shape. If `list`, then it must define a feature function per
scale. Please refer to `menpo.feature` for a list of potential features.
patch_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the patches of the training
images. Note that, as opposed to `holistic_features`, these features
are extracted after extracting the patches. If `list`, then it must
define a feature function per scale. Please refer to `menpo.feature`
and `menpofit.feature` for a list of potential features.
patch_shape : (`int`, `int`) or `list` of (`int`, `int`), optional
The shape of the patches to be extracted. If a `list` is provided,
then it defines a patch shape per scale.
scales : `float` or `tuple` of `float`, optional
The scale value of each scale. They must provided in ascending order,
i.e. from lowest to highest scale. If `float`, then a single scale is
assumed.
n_iterations : `int` or `list` of `int`, optional
The number of iterations (cascades) of each level. If `list`, it must
specify a value per scale. If `int`, then it defines the total number of
iterations (cascades) over all scales.
n_perturbations : `int`, optional
The number of perturbations to be generated from each of the bounding
boxes using `perturb_from_gt_bounding_box`.
perturb_from_gt_bounding_box : `callable`, optional
The function that will be used to generate the perturbations from each
of the bounding boxes.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
verbose : `bool`, optional
If ``True``, then the progress of the training will be printed.
References
----------
.. [1] X. Xiong, and F. De la Torre. "Supervised Descent Method and its
applications to face alignment", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2013.
"""
def __init__(self, images, group=None, bounding_box_group_glob=None,
reference_shape=None, diagonal=None, holistic_features=no_op,
patch_features=no_op, patch_shape=(17, 17), scales=(0.5, 1.0),
n_iterations=3, n_perturbations=30,
perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
batch_size=None, verbose=False):
super(SDM, self).__init__(
images, group=group,
bounding_box_group_glob=bounding_box_group_glob,
reference_shape=reference_shape,
sd_algorithm_cls=NonParametricNewton,
holistic_features=holistic_features,
patch_features=patch_features, patch_shape=patch_shape,
diagonal=diagonal, scales=scales, n_iterations=n_iterations,
n_perturbations=n_perturbations,
perturb_from_gt_bounding_box=perturb_from_gt_bounding_box,
batch_size=batch_size, verbose=verbose)
class RegularizedSDM(SupervisedDescentFitter):
r"""
Class for training a non-parametric multi-scale Supervised Descent model
using :map:`NonParametricNewton` with regularization.
Parameters
----------
images : `list` of `menpo.image.Image`
The `list` of training images.
group : `str` or ``None``, optional
The landmark group that corresponds to the ground truth shape of each
image. If ``None`` and the images only have a single landmark group,
then that is the one that will be used. Note that all the training
images need to have the specified landmark group.
bounding_box_group_glob : `glob` or ``None``, optional
Glob that defines the bounding boxes to be used for training. If
``None``, then the bounding boxes of the ground truth shapes are used.
alpha : `float`, optional
The regression regularization parameter.
reference_shape : `menpo.shape.PointCloud` or ``None``, optional
The reference shape that will be used for normalising the size of the
training images. The normalization is performed by rescaling all the
training images so that the scale of their ground truth shapes
matches the scale of the reference shape. Note that the reference
shape is rescaled with respect to the `diagonal` before performing
the normalisation. If ``None``, then the mean shape will be used.
diagonal : `int` or ``None``, optional
This parameter is used to rescale the reference shape so that the
diagonal of its bounding box matches the provided value. In other
words, this parameter controls the size of the model at the highest
scale. If ``None``, then the reference shape does not get rescaled.
holistic_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the training images. Note
that the features are extracted before warping the images to the
reference shape. If `list`, then it must define a feature function per
scale. Please refer to `menpo.feature` for a list of potential features.
patch_features : `closure` or `list` of `closure`, optional
The features that will be extracted from the patches of the training
images. Note that, as opposed to `holistic_features`, these features
are extracted after extracting the patches. If `list`, then it must
define a feature function per scale. Please refer to `menpo.feature`
and `menpofit.feature` for a list of potential features.
patch_shape : (`int`, `int`) or `list` of (`int`, `int`), optional
The shape of the patches to be extracted. If a `list` is provided,
then it defines a patch shape per scale.
scales : `float` or `tuple` of `float`, optional
The scale value of each scale. They must provided in ascending order,
i.e. from lowest to highest scale. If `float`, then a single scale is
assumed.
n_iterations : `int` or `list` of `int`, optional
The number of iterations (cascades) of each level. If `list`, it must
specify a value per scale. If `int`, then it defines the total number of
iterations (cascades) over all scales.
n_perturbations : `int`, optional
The number of perturbations to be generated from each of the bounding
boxes using `perturb_from_gt_bounding_box`.
perturb_from_gt_bounding_box : `callable`, optional
The function that will be used to generate the perturbations from each
of the bounding boxes.
batch_size : `int` or ``None``, optional
If an `int` is provided, then the training is performed in an
incremental fashion on image batches of size equal to the provided
value. If ``None``, then the training is performed directly on the
all the images.
verbose : `bool`, optional
If ``True``, then the progress of the training will be printed.
References
----------
.. [1] X. Xiong, and F. De la Torre. "Supervised Descent Method and its
applications to face alignment", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2013.
"""
def __init__(self, images, group=None, bounding_box_group_glob=None,
alpha=0.0001, reference_shape=None, diagonal=None,
holistic_features=no_op, patch_features=no_op,
patch_shape=(17, 17), scales=(0.5, 1.0), n_iterations=6,
n_perturbations=30,
perturb_from_gt_bounding_box=noisy_shape_from_bounding_box,
batch_size=None, verbose=False):
super(RegularizedSDM, self).__init__(
images, group=group,
bounding_box_group_glob=bounding_box_group_glob,
reference_shape=reference_shape,
sd_algorithm_cls=partial(NonParametricNewton, alpha=alpha),
holistic_features=holistic_features, patch_features=patch_features,
patch_shape=patch_shape, diagonal=diagonal, scales=scales,
n_iterations=n_iterations, n_perturbations=n_perturbations,
perturb_from_gt_bounding_box=perturb_from_gt_bounding_box,
batch_size=batch_size, verbose=verbose)
|
|
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import datetime
import neat.db_utils as db_utils
import logging
logging.disable(logging.CRITICAL)
class Db(TestCase):
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='test')
assert db.vms.select().execute().first()['uuid'] == 'test'
db.vm_resource_usage.insert().execute(vm_id=1, cpu_mhz=1000)
assert db.vm_resource_usage.select(). \
execute().first()['cpu_mhz'] == 1000
@qc(10)
def select_cpu_mhz_for_vm(
uuid=str_(of='abc123-', min_length=36, max_length=36),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
for mhz in cpu_mhz:
db.vm_resource_usage.insert().execute(
vm_id=vm_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_vm(uuid, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_vms(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for uuid, data in vms.items():
for value in data:
db.insert_vm_cpu_mhz({uuid: value})
if data:
res[uuid] = data[-1]
assert db.select_last_cpu_mhz_for_vms() == res
@qc(10)
def select_vm_id(
uuid1=str_(of='abc123-', min_length=36, max_length=36),
uuid2=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid1)
vm_id = result.inserted_primary_key[0]
assert db.select_vm_id(uuid1) == vm_id
assert db.select_vm_id(uuid2) == vm_id + 1
@qc(10)
def insert_vm_cpu_mhz(
vms=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=tuple_(int_(min=1, max=3000),
list_(of=int_(min=1, max=3000),
min_length=0, max_length=10)),
min_length=0, max_length=5
)
):
db = db_utils.init_db('sqlite:///:memory:')
initial_data = []
data_to_submit = {}
final_data = {}
for uuid, data in vms.items():
vm_id = db.select_vm_id(uuid)
data_to_submit[uuid] = data[0]
final_data[uuid] = list(data[1])
final_data[uuid].append(data[0])
for cpu_mhz in data[1]:
initial_data.append({'vm_id': vm_id,
'cpu_mhz': cpu_mhz})
if initial_data:
db.vm_resource_usage.insert().execute(initial_data)
db.insert_vm_cpu_mhz(data_to_submit)
for uuid, data in final_data.items():
assert db.select_cpu_mhz_for_vm(uuid, 11) == data
@qc(1)
def update_host():
db = db_utils.init_db('sqlite:///:memory:')
db.update_host('host1', 3000, 4, 4000)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3000
assert host['cpu_cores'] == 4
assert host['ram'] == 4000
db.update_host('host1', 3500, 8, 8000)
hosts = db.hosts.select().execute().fetchall()
assert len(hosts) == 1
host = hosts[0]
assert host['hostname'] == 'host1'
assert host['cpu_mhz'] == 3500
assert host['cpu_cores'] == 8
assert host['ram'] == 8000
@qc(10)
def select_cpu_mhz_for_host(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
n=int_(min=1, max=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
for mhz in cpu_mhz:
db.host_resource_usage.insert().execute(
host_id=host_id,
cpu_mhz=mhz)
assert db.select_cpu_mhz_for_host(hostname, n) == cpu_mhz[-n:]
@qc(10)
def select_last_cpu_mhz_for_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=5, max_length=10),
values=list_(of=int_(min=1, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for hostname, data in hosts.items():
db.update_host(hostname, 1, 1, 1)
for value in data:
db.insert_host_cpu_mhz(hostname, value)
if data:
res[hostname] = data[-1]
else:
res[hostname] = 0
assert db.select_last_cpu_mhz_for_hosts() == res
@qc(10)
def insert_host_cpu_mhz(
hostname=str_(of='abc123', min_length=5, max_length=10),
cpu_mhz=list_(of=int_(min=0, max=3000), min_length=1, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
db.update_host(hostname, 1, 1, 1)
for value in cpu_mhz:
db.insert_host_cpu_mhz(hostname, value)
assert db.select_cpu_mhz_for_host(hostname, len(cpu_mhz)) == cpu_mhz
@qc(1)
def select_host_characteristics():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_characteristics() == ({}, {}, {})
db.update_host('host1', 3000, 4, 4000)
db.update_host('host2', 3500, 8, 8000)
assert db.select_host_characteristics() == \
({'host1': 3000, 'host2': 3500},
{'host1': 4, 'host2': 8},
{'host1': 4000, 'host2': 8000})
@qc(1)
def select_host_id():
db = db_utils.init_db('sqlite:///:memory:')
host1_id = db.hosts.insert().execute(
hostname='host1',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
host2_id = db.hosts.insert().execute(
hostname='host2',
cpu_mhz=1,
cpu_cores=1,
ram=1).inserted_primary_key[0]
assert db.select_host_id('host1') == host1_id
assert db.select_host_id('host2') == host2_id
@qc(1)
def select_host_ids():
db = db_utils.init_db('sqlite:///:memory:')
assert db.select_host_ids() == {}
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
assert db.select_host_ids() == hosts
@qc(1)
def cleanup_vm_resource_usage(
uuid=str_(of='abc123-', min_length=36, max_length=36)
):
db = db_utils.init_db('sqlite:///:memory:')
result = db.vms.insert().execute(uuid=uuid)
vm_id = result.inserted_primary_key[0]
time = datetime.datetime.today()
for i in range(10):
db.vm_resource_usage.insert().execute(
vm_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(10)
db.cleanup_vm_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_vm(uuid, 100) == range(5, 10)
@qc(1)
def cleanup_host_resource_usage(
hostname=str_(of='abc123', min_length=5, max_length=10)
):
db = db_utils.init_db('sqlite:///:memory:')
host_id = db.update_host(hostname, 1, 1, 1)
time = datetime.datetime.today()
for i in range(10):
db.host_resource_usage.insert().execute(
host_id=1,
cpu_mhz=i,
timestamp=time.replace(second=i))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(10)
db.cleanup_host_resource_usage(time.replace(second=5))
assert db.select_cpu_mhz_for_host(hostname, 100) == range(5, 10)
def test_insert_host_states(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_states({'host1': 0, 'host2': 1})
db.insert_host_states({'host1': 0, 'host2': 0})
db.insert_host_states({'host1': 1, 'host2': 1})
result = db.host_states.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [0, 0, 1])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [1, 0, 1])
@qc(10)
def select_host_states(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = {}
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data:
res[host] = data[-1]
else:
res[host] = 1
assert db.select_host_states() == res
@qc(10)
def select_active_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 1 or not data:
res.append(host)
assert set(db.select_active_hosts()) == set(res)
@qc(10)
def select_inactive_hosts(
hosts=dict_(
keys=str_(of='abc123', min_length=1, max_length=5),
values=list_(of=int_(min=0, max=1),
min_length=0, max_length=10),
min_length=0, max_length=3
)
):
hosts = {'1ab': [0], '3222': [0, 0, 1, 1, 1, 1, 0, 0], 'b222b': [0, 0, 1, 1, 1, 0, 1]}
db = db_utils.init_db('sqlite:///:memory:')
res = []
for host, data in hosts.items():
db.update_host(host, 1, 1, 1)
for state in data:
db.insert_host_states({host: state})
if data and data[-1] == 0:
res.append(host)
assert set(db.select_inactive_hosts()) == set(res)
def test_insert_host_overload(self):
db = db_utils.init_db('sqlite:///:memory:')
hosts = {}
hosts['host1'] = db.update_host('host1', 1, 1, 1)
hosts['host2'] = db.update_host('host2', 1, 1, 1)
db.insert_host_overload('host2', False)
db.insert_host_overload('host1', True)
db.insert_host_overload('host1', False)
db.insert_host_overload('host2', True)
result = db.host_overload.select().execute().fetchall()
host1 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host1'],
result), key=lambda x: x[0])]
self.assertEqual(host1, [1, 0])
host2 = [x[3] for x in sorted(filter(
lambda x: x[1] == hosts['host2'],
result), key=lambda x: x[0])]
self.assertEqual(host2, [0, 1])
@qc(1)
def insert_select():
db = db_utils.init_db('sqlite:///:memory:')
db.vms.insert().execute(uuid='x' * 36).inserted_primary_key[0]
vm_id = db.vms.insert().execute(uuid='vm' * 18).inserted_primary_key[0]
host_id = db.update_host('host', 1, 1, 1)
db.insert_vm_migration('vm' * 18, 'host')
result = db.vm_migrations.select().execute().first()
assert result[1] == vm_id
assert result[2] == host_id
|
|
"""Accumuldated days"""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"high_above": "High Temperature At or Above",
"high_below": "High Temperature Below",
"low_above": "Low Temperature At or Above",
"low_below": "Low Temperature Below",
}
PDICT2 = {"jan1": "January 1", "jul1": "July 1"}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = {}
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This plot displays the accumulated number of days
that the high or low temperature was above or below some threshold.
"""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select Station:",
network="IACLIMATE",
),
dict(
type="select",
name="var",
default="high_above",
label="Which Metric",
options=PDICT,
),
dict(type="int", name="threshold", default=32, label="Threshold (F)"),
dict(
type="select",
name="split",
default="jan1",
options=PDICT2,
label="Where to split the year?",
),
dict(
type="year",
name="year",
default=datetime.date.today().year,
label="Year to Highlight in Chart",
),
]
return desc
def highcharts(fdict):
"""Highcharts Output"""
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
varname = ctx["var"]
df = get_data(ctx)
j = {}
j["tooltip"] = {
"shared": True,
"headerFormat": (
'<span style="font-size: 10px">{point.key: %b %e}</span><br/>'
),
}
j["title"] = {
"text": "%s [%s] %s %sF"
% (
ctx["_nt"].sts[station]["name"],
station,
PDICT[varname],
int(fdict.get("threshold", 32)),
)
}
j["yAxis"] = {"title": {"text": "Accumulated Days"}, "startOnTick": False}
j["xAxis"] = {
"type": "datetime",
"dateTimeLabelFormats": { # don't display the dummy year
"month": "%e. %b",
"year": "%b",
},
"title": {"text": "Date"},
}
j["chart"] = {"zoomType": "xy", "type": "line"}
avgs = []
ranges = []
thisyear = []
for doy, row in df.iterrows():
ts = datetime.date(2001, 1, 1) + datetime.timedelta(days=(doy - 1))
ticks = (ts - datetime.date(1970, 1, 1)).total_seconds() * 1000.0
avgs.append([ticks, row["avg"]])
ranges.append([ticks, row["min"], row["max"]])
if row["thisyear"] >= 0:
thisyear.append([ticks, row["thisyear"]])
lbl = (
"%s" % (fdict.get("year", 2015),)
if fdict.get("split", "jan1") == "jan1"
else "%s - %s"
% (int(fdict.get("year", 2015)) - 1, int(fdict.get("year", 2015)))
)
j["series"] = [
{
"name": "Average",
"data": avgs,
"zIndex": 1,
"tooltip": {"valueDecimals": 2},
"marker": {
"fillColor": "white",
"lineWidth": 2,
"lineColor": "red",
},
},
{
"name": lbl,
"data": thisyear,
"zIndex": 1,
"marker": {
"fillColor": "blue",
"lineWidth": 2,
"lineColor": "green",
},
},
{
"name": "Range",
"data": ranges,
"type": "arearange",
"lineWidth": 0,
"linkedTo": ":previous",
"color": "tan",
"fillOpacity": 0.3,
"zIndex": 0,
},
]
return j
def get_data(ctx):
"""Get the data"""
pgconn = get_dbconn("coop")
station = ctx["station"]
threshold = ctx["threshold"]
varname = ctx["var"]
year = ctx["year"]
split = ctx["split"]
table = f"alldata_{station[:2]}"
days = 0 if split == "jan1" else 183
opp = " < " if varname.find("_below") > 0 else " >= "
col = "high" if varname.find("high") == 0 else "low"
# We need to do some magic to compute the start date, since we don't want
# an incomplete year mucking things up
sts = ctx["_nt"].sts[station]["archive_begin"]
if sts is None:
raise NoDataFound("Unknown station metadata.")
if sts.month > 1:
sts = sts + datetime.timedelta(days=365)
sts = sts.replace(month=1, day=1)
if split == "jul1":
sts = sts.replace(month=7, day=1)
df = read_sql(
f"""
with data as (
select extract(year from day + '%s days'::interval) as season,
extract(doy from day + '%s days'::interval) as doy,
(case when {col} {opp} %s then 1 else 0 end) as hit
from {table}
where station = %s and day >= %s),
agg1 as (
SELECT season, doy,
sum(hit) OVER (PARTITION by season ORDER by doy ASC) from data)
SELECT doy - %s as doy, min(sum), avg(sum), max(sum),
max(case when season = %s then sum else null end) as thisyear from agg1
WHERE doy < 365 GROUP by doy ORDER by doy ASC
""",
pgconn,
params=(days, days, threshold, station, sts, days, year),
index_col=None,
)
df["datestr"] = df["doy"].apply(
lambda x: (
datetime.date(2001, 1, 1) + datetime.timedelta(days=x)
).strftime("%-d %b")
)
df = df.set_index("doy")
return df
def plotter(fdict):
"""Go"""
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
threshold = ctx["threshold"]
varname = ctx["var"]
year = ctx["year"]
df = get_data(ctx)
if df.empty:
raise NoDataFound("Error, no results returned!")
title = ("%s [%s]\n" r"%s %.0f$^\circ$F") % (
ctx["_nt"].sts[station]["name"],
station,
PDICT[varname],
threshold,
)
(fig, ax) = figure_axes(title=title, apctx=ctx)
ax.plot(df.index.values, df["avg"], c="k", lw=2, label="Average")
ax.plot(df.index.values, df["thisyear"], c="g", lw=2, label=f"{year}")
ax.plot(df.index.values, df["max"], c="r", lw=2, label="Max")
ax.plot(df.index.values, df["min"], c="b", lw=2, label="Min")
ax.legend(ncol=1, loc=2)
xticks = []
xticklabels = []
for x in range(int(df.index.min()) - 1, int(df.index.max()) + 1):
ts = datetime.date(2000, 1, 1) + datetime.timedelta(days=x)
if ts.day == 1:
xticks.append(x)
xticklabels.append(ts.strftime("%b"))
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.grid(True)
ax.set_xlim(int(df.index.min()) - 1, int(df.index.max()) + 1)
ax.set_ylabel("Accumulated Days")
return fig, df
if __name__ == "__main__":
plotter(dict())
|
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import os
import pickle
import random
import subprocess
import sys
import time
import unittest
from test import support
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import threading
except ImportError:
threading = None
# Skip this test if the _testcapi module isn't available.
_testcapi = support.import_module('_testcapi')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
with support.suppress_crash_popup():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error:'
b' PyThreadState_Get: no current thread'))
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,[1, 2],5,6,7,8,9,10,11,12,13,14,15,16,17)
@unittest.skipIf(support.check_impl_detail(pypy=True),
'Py_AddPendingCall not currently supported.')
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = _testcapi.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
# PyPy change: Mask out higher bits of reference count. PyPy increases
# the reference count by a high number if the object is linked to a
# PyPy object.
self.assertEqual(_testcapi.argparsing("Hello", "World") & 0xfffffff, 1)
class EmbeddingTest(unittest.TestCase):
@unittest.skipIf(
sys.platform.startswith('win'),
"test doesn't work under Windows")
def test_subinterps(self):
# XXX only tested under Unix checkouts
basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
oldcwd = os.getcwd()
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
os.chdir(basepath)
try:
exe = os.path.join(basepath, "Modules", "_testembed")
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
p = subprocess.Popen([exe],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
if support.verbose:
print()
print(out.decode('latin1'))
print(err.decode('latin1'))
finally:
os.chdir(oldcwd)
class SkipitemTest(unittest.TestCase):
def test_skipitem(self):
"""
If this test failed, you probably added a new "format unit"
in Python/getargs.c, but neglected to update our poor friend
skipitem() in the same file. (If so, shame on you!)
With a few exceptions**, this function brute-force tests all
printable ASCII*** characters (32 to 126 inclusive) as format units,
checking to see that PyArg_ParseTupleAndKeywords() return consistent
errors both when the unit is attempted to be used and when it is
skipped. If the format unit doesn't exist, we'll get one of two
specific error messages (one for used, one for skipped); if it does
exist we *won't* get that error--we'll get either no error or some
other error. If we get the specific "does not exist" error for one
test and not for the other, there's a mismatch, and the test fails.
** Some format units have special funny semantics and it would
be difficult to accomodate them here. Since these are all
well-established and properly skipped in skipitem() we can
get away with not testing them--this test is really intended
to catch *new* format units.
*** Python C source files must be ASCII. Therefore it's impossible
to have non-ASCII format units.
"""
empty_tuple = ()
tuple_1 = (0,)
dict_b = {'b':1}
keywords = ["a", "b"]
for i in range(32, 127):
c = chr(i)
# skip parentheses, the error reporting is inconsistent about them
# skip 'e', it's always a two-character code
# skip '|' and '$', they don't represent arguments anyway
if c in '()e|$':
continue
# test the format unit when not skipped
format = c + "i"
try:
# (note: the format string must be bytes!)
_testcapi.parse_tuple_and_keywords(tuple_1, dict_b,
format.encode("ascii"), keywords)
when_not_skipped = False
except TypeError as e:
s = "argument 1 must be impossible<bad format char>, not int"
when_not_skipped = (str(e) == s)
except RuntimeError as e:
when_not_skipped = False
# test the format unit when skipped
optional_format = "|" + format
try:
_testcapi.parse_tuple_and_keywords(empty_tuple, dict_b,
optional_format.encode("ascii"), keywords)
when_skipped = False
except RuntimeError as e:
s = "impossible<bad format char>: '{}'".format(format)
when_skipped = (str(e) == s)
message = ("test_skipitem_parity: "
"detected mismatch between convertsimple and skipitem "
"for format unit '{}' ({}), not skipped {}, skipped {}".format(
c, i, when_skipped, when_not_skipped))
self.assertIs(when_skipped, when_not_skipped, message)
def test_parse_tuple_and_keywords(self):
# parse_tuple_and_keywords error handling tests
self.assertRaises(TypeError, _testcapi.parse_tuple_and_keywords,
(), {}, 42, [])
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [''] * 42)
self.assertRaises(ValueError, _testcapi.parse_tuple_and_keywords,
(), {}, b'', [42])
@unittest.skipIf(support.check_impl_detail(pypy=True),
'Not currently supported under PyPy')
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestThreadState(unittest.TestCase):
@support.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
def test__testcapi(self):
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if support.verbose:
print("_tescapi.%s()" % name)
test()
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/env python
""" UCS worker class for pyflex
In the future, this will really be the core renderer. All of the
work of deleting config that shouldn't be there, or creating it
when it should, will be handled here.
Think of the functions file(s) as just that - containers of functions.
This file is where those functions are called, and where you determine
your workflow (i.e. checking for existence of a resource before deleting
it)
"""
#Import UCS Packages
from UcsSdk import UcsHandle
#Import PyFlex Dependencies
from worker import FlexWorker
from functions.newfunctions_ucs import NewUcsFunctions
class UcsWorker(FlexWorker):
""" A child worker class that pertains specifically to UCS """
FABRICS = ['A', 'B']
def startworker(self):
""" Starts this worker """
#Connect to UCSM
handle = UcsHandle()
ucsauth = self.config['auth']['ucs']
print ucsauth
handle.Login(
ucsauth['host'],
ucsauth['user'],
ucsauth['pass']
)
newfxns = NewUcsFunctions(handle, self.config['ucs']['org'])
""" VLANS """
#Temporary dict, to take the groups out of the picture temporarily
#This is a good example of why you need to rethink this mapping
vlans = {}
#Get a list of all VLANs, regardless of group
for group in self.config['vlans']:
#Add to temporary dict
for vlanid, vlanname in self.config['vlans'][group] \
.iteritems():
vlans[str(vlanid)] = vlanname
#Remove any VLANs within UCS that are not in temporary dict
for mo in newfxns.getVLANs().OutConfigs.GetChild():
if mo.Id in vlans.keys() and vlans[mo.Id] == mo.Name:
pass
else:
newfxns.removeVLAN(mo)
#Add all VLANs in the temporary dict to UCS
for vlanid, vlanname in vlans.iteritems():
newfxns.createVLAN(vlanid, vlanname)
""" VSANS """
#Temporary dict, to take the groups out of the picture temporarily
#This is a good example of why you need to rethink this mapping
vsans = {}
#Get a list of all VLANs, regardless of group
for fabric in self.config['vsans']:
#Add to temporary dict
for vsanid, vsanname in self.config['vsans'][fabric] \
.iteritems():
vsans[str(vsanid)] = vsanname
#Remove any VSANs within UCS that are not in temporary dict
for mo in newfxns.getVSANs().OutConfigs.GetChild():
if mo.Id in vsans.keys() and vsans[mo.Id] == mo.Name:
pass
else:
newfxns.removeVSAN(mo)
#Add all VSANs in the temporary dict to UCS
for fabric in ['a', 'b']:
for vsanid, vsanname in self.config['vsans'][fabric].iteritems():
newfxns.createVSAN(fabric.upper(), vsanid, vsanname)
""" IP KVM POOL """
newfxns.createIpPool(
self.config['ucs']['pools']['ip']['start'],
self.config['ucs']['pools']['ip']['end'],
self.config['ucs']['pools']['ip']['mask'],
self.config['ucs']['pools']['ip']['gw']
)
""" MAC POOLS """
#Pull MAC Pool Configuration Info
macpools = self.config['ucs']['pools']['mac']
#MAC Pools
for fabric in macpools:
newfxns.createMacPool(
fabric,
macpools[fabric]['blockbegin'],
macpools[fabric]['blockend']
)
""" WWPN POOLS """
#Pull WWPN Pool Configuration Info
wwpnpools = self.config['ucs']['pools']['wwpn']
#WWPN Pools
for fabric in wwpnpools:
newfxns.createWwpnPool(
fabric,
wwpnpools[fabric]['blockbegin'],
wwpnpools[fabric]['blockend']
)
#TODO: UUID POOL
#TODO: WWNN POOL
""" GLOBAL POLICIES """
newfxns.setPowerPolicy("grid")
newfxns.setChassisDiscoveryPolicy(str(self.config['ucs']['links']))
""" QOS """
newfxns.setGlobalQosPolicy(self.config['qos'])
for classname, hostcontrol in self.config['qos']['classes'] \
.iteritems():
newfxns.createQosPolicy(classname, hostcontrol)
""" ORG-SPECIFIC POLICIES """
newfxns.createLocalDiskPolicy("NO_LOCAL", "no-local-storage")
newfxns.createLocalDiskPolicy("RAID1", "raid-mirrored")
newfxns.createHostFWPackage("HOST_FW_PKG")
newfxns.createMaintPolicy("MAINT_USERACK", "user-ack")
newfxns.createNetControlPolicy("NTKCTRL-CDP")
""" VNIC TEMPLATES """
# Create vNIC Templates
for vnicprefix, vlangroup in self.config['ucs']['vlangroups'] \
.iteritems():
for fabricid in self.FABRICS:
#Create vNIC Template
vnic = newfxns.createVnicTemplate(vnicprefix, fabricid)[0]
#Remove any VLANs on this emplate that are not in the config
for vlanMo in newfxns.getVnicVlans(vnic).OutConfigs \
.GetChild():
#Have to use Name and values - no "Id" parameter
if vlanMo.Name in self.config['vlans'][vlangroup].values():
pass
else:
newfxns.removeVnicVlan(vnic, vlanMo.Name)
#Add VLANs from configuration to this vNIC
for vlanid, vlanname in self.config['vlans'][vlangroup] \
.iteritems():
newfxns.createVnicVlan(vnic, vlanname)
""" VHBA TEMPLATES """
# Create vHBA Templates
for fabricid in self.FABRICS:
vhba = newfxns.createVhbaTemplate(fabricid)[0]
# No removal method necessary; will override existing VSAN setting
for vsanid, vsanname in self.config['vsans'][fabricid.lower()] \
.iteritems():
newfxns.createVhbaVsan(vhba, vsanname)
""" SP TEMPLATES """
spt = newfxns.createSPTemplate(self.config['ucs']['sptname'])
vnics = [ #TODO: This is an ugly way to do this
"ESX-MGMT-A",
"ESX-MGMT-B",
"ESX-NFS-A",
"ESX-NFS-B",
"ESX-PROD-A",
"ESX-PROD-B"
]
vhbas = [
"ESX-VHBA-A",
"ESX-VHBA-B"
]
#Create vNIC VCON assignments
for vnic in vnics:
transport = "ethernet"
order = str(vnics.index(vnic) + 1)
newfxns.setVconOrder(spt[0], vnic, transport, order)
newfxns.addVnicFromTemplate(spt[0], vnic)
for vhba in vhbas:
transport = "fc"
order = str(vhbas.index(vhba) + 1)
newfxns.setVconOrder(spt[0], vhba, transport, order)
newfxns.addVhbaFromTemplate(spt[0], vhba)
newfxns.setWWNNPool(spt[0], "ESXi-WWNN")
newfxns.setPowerState(spt[0], "admin-up")
# newfxns.spawnZerglings(
# spt[0],
# self.config['ucs']['spprefix'],
# self.config['ucs']['numbertospawn']
# )
|
|
# Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a list of all the functions that are not auto-generated.
# It contains all the meta data that describes the function.
templated_type_symbol_map = {
'bool' : 'b',
'int8_t' : 'a',
'int16_t' : 's',
'int32_t' : 'i',
'int64_t' : 'l',
'float' : 'f',
'double' : 'd',
'string' : 'NS_11StringValueE',
'timestamp' : 'NS_14TimestampValueE'
}
# Generates the BE symbol for the Compute Function class_name::fn_name<templated_type>.
# Does not handle varargs.
# TODO: this is a stopgap. ComputeFunctions are being removed and we can use the
# symbol lookup code in the BE.
def symbol(class_name, fn_name, templated_type = None):
sym = '_ZN6impala'
sym += str(len(class_name)) + class_name
sym += str(len(fn_name)) + fn_name
if templated_type == None:
sym += 'EPNS_4ExprEPNS_8TupleRowE'
else:
sym += 'I'
sym += templated_type_symbol_map[templated_type]
sym += 'EEPvPNS_4ExprEPNS_8TupleRowE'
return sym
# The format is:
# [sql aliases], <return_type>, [<args>], <backend symbol>,
# With an optional
# <prepare symbol>, <close symbol>
#
# 'sql aliases' are the function names that can be used from sql. There must be at least
# one per function.
#
# The symbol can be empty for functions that are not yet implemented or are special-cased
# in Expr::CreateExpr() (i.e., functions that are implemented via a custom Expr class
# rather than a single function).
visible_functions = [
[['udf_pi'], 'DOUBLE', [], 'impala::UdfBuiltins::Pi'],
[['udf_abs'], 'DOUBLE', ['DOUBLE'], 'impala::UdfBuiltins::Abs'],
[['udf_lower'], 'STRING', ['STRING'], 'impala::UdfBuiltins::Lower'],
[['max_int'], 'INT', [],
'_ZN6impala11UdfBuiltins6MaxIntEPN10impala_udf15FunctionContextE'],
[['max_tinyint'], 'TINYINT', [],
'_ZN6impala11UdfBuiltins10MaxTinyIntEPN10impala_udf15FunctionContextE'],
[['max_smallint'], 'SMALLINT', [],
'_ZN6impala11UdfBuiltins11MaxSmallIntEPN10impala_udf15FunctionContextE'],
[['max_bigint'], 'BIGINT', [],
'_ZN6impala11UdfBuiltins9MaxBigIntEPN10impala_udf15FunctionContextE'],
[['min_int'], 'INT', [],
'_ZN6impala11UdfBuiltins6MinIntEPN10impala_udf15FunctionContextE'],
[['min_tinyint'], 'TINYINT', [],
'_ZN6impala11UdfBuiltins10MinTinyIntEPN10impala_udf15FunctionContextE'],
[['min_smallint'], 'SMALLINT', [],
'_ZN6impala11UdfBuiltins11MinSmallIntEPN10impala_udf15FunctionContextE'],
[['min_bigint'], 'BIGINT', [],
'_ZN6impala11UdfBuiltins9MinBigIntEPN10impala_udf15FunctionContextE'],
[['is_nan'], 'BOOLEAN', ['DOUBLE'],
'_ZN6impala11UdfBuiltins5IsNanEPN10impala_udf15FunctionContextERKNS1_9DoubleValE'],
[['is_inf'], 'BOOLEAN', ['DOUBLE'],
'_ZN6impala11UdfBuiltins5IsInfEPN10impala_udf15FunctionContextERKNS1_9DoubleValE'],
[['trunc'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
'_ZN6impala11UdfBuiltins5TruncEPN10impala_udf15FunctionContextERKNS1_12TimestampValERKNS1_9StringValE',
'_ZN6impala11UdfBuiltins12TruncPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins10TruncCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Don't add an entry for EXTRACT(STRING, TIMESTAMP). STRINGs may be used to represent
# TIMESTAMPs meaning EXTRACT(STRING, STRING) is valid. If EXTRACT(STRING, TIMESTAMP)
# is added, it takes precedence over the existing EXTRACT(TIMESTAMP, STRING)
# which could break users.
[['extract'], 'INT', ['TIMESTAMP', 'STRING'],
'_ZN6impala11UdfBuiltins7ExtractEPN10impala_udf15FunctionContextERKNS1_12TimestampValERKNS1_9StringValE',
'_ZN6impala11UdfBuiltins21SwappedExtractPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins12ExtractCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['date_part'], 'INT', ['STRING', 'TIMESTAMP'],
'_ZN6impala11UdfBuiltins7ExtractEPN10impala_udf15FunctionContextERKNS1_9StringValERKNS1_12TimestampValE',
'_ZN6impala11UdfBuiltins14ExtractPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala11UdfBuiltins12ExtractCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['madlib_encode_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins12EncodeVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_decode_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins12DecodeVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_print_vector'], 'STRING', ['STRING'],
'_ZN6impala11UdfBuiltins11PrintVectorEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['madlib_vector'], 'STRING', ['DOUBLE', '...'],
'_ZN6impala11UdfBuiltins8ToVectorEPN10impala_udf15FunctionContextEiPKNS1_9DoubleValE'],
[['madlib_vector_get'], 'DOUBLE', ['BIGINT', 'STRING'],
'_ZN6impala11UdfBuiltins9VectorGetEPN10impala_udf15FunctionContextERKNS1_9BigIntValERKNS1_9StringValE'],
# Timestamp functions
[['unix_timestamp'], 'BIGINT', ['STRING'], '_ZN6impala18TimestampFunctions14UnixFromStringEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['year'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4YearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['month'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions5MonthEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayofweek'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions9DayOfWeekEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['day', 'dayofmonth'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions10DayOfMonthEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayofyear'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions9DayOfYearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['weekofyear'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions10WeekOfYearEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['hour'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4HourEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['minute'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6MinuteEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['second'], 'INT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6SecondEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['to_date'], 'STRING', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions6ToDateEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['dayname'], 'STRING', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions7DayNameEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['years_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['years_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time14years_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add', 'add_months'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add', 'add_months'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb1EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['weeks_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['weeks_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9gregorian14weeks_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_add', 'date_add', 'adddate'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_add', 'date_add', 'adddate'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_sub', 'date_sub', 'subdate'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['days_sub', 'date_sub', 'subdate'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9gregorian13date_durationELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['hours_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time5hoursELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['minutes_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time7minutesELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['seconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost10posix_time7secondsELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKS7_RKT0_'],
[['milliseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['milliseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['microseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_add'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['nanoseconds_sub'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time18subsecond_durationINS4_10posix_time13time_durationELl1000000000EEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['datediff'], 'INT', ['TIMESTAMP', 'TIMESTAMP'], '_ZN6impala18TimestampFunctions8DateDiffEPN10impala_udf15FunctionContextERKNS1_12TimestampValES6_'],
[['unix_timestamp'], 'BIGINT', [], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextE'],
[['unix_timestamp'], 'BIGINT', ['TIMESTAMP'], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['unix_timestamp'], 'BIGINT', ['STRING', 'STRING'], '_ZN6impala18TimestampFunctions4UnixEPN10impala_udf15FunctionContextERKNS1_9StringValES6_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['from_unixtime'], 'STRING', ['INT'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['from_unixtime'], 'STRING', ['INT', 'STRING'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_RKS4_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['from_unixtime'], 'STRING', ['BIGINT'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['from_unixtime'], 'STRING', ['BIGINT', 'STRING'],
'_ZN6impala18TimestampFunctions8FromUnixIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_RKS4_',
'_ZN6impala18TimestampFunctions22UnixAndFromUnixPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala18TimestampFunctions20UnixAndFromUnixCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['now', 'current_timestamp'], 'TIMESTAMP', [], '_ZN6impala18TimestampFunctions3NowEPN10impala_udf15FunctionContextE'],
[['from_utc_timestamp'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
"impala::TimestampFunctions::FromUtc"],
[['to_utc_timestamp'], 'TIMESTAMP', ['TIMESTAMP', 'STRING'],
"impala::TimestampFunctions::ToUtc"],
[['timeofday'], 'STRING', [],"impala::TimestampFunctions::TimeOfDay"],
[['timestamp_cmp'], 'INT', ['TIMESTAMP', 'TIMESTAMP'],
"impala::TimestampFunctions::TimestampCmp"],
[['int_months_between'], 'INT', ['TIMESTAMP', 'TIMESTAMP'],
"impala::TimestampFunctions::IntMonthsBetween"],
[['months_between'], 'DOUBLE', ['TIMESTAMP', 'TIMESTAMP'],
"impala::TimestampFunctions::MonthsBetween"],
# Math builtin functions
[['pi'], 'DOUBLE', [], 'impala::MathFunctions::Pi'],
[['e'], 'DOUBLE', [], 'impala::MathFunctions::E'],
[['abs'], 'BIGINT', ['BIGINT'], 'impala::MathFunctions::Abs'],
[['abs'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Abs'],
[['abs'], 'FLOAT', ['FLOAT'], 'impala::MathFunctions::Abs'],
[['abs'], 'INT', ['INT'], 'impala::MathFunctions::Abs'],
[['abs'], 'SMALLINT', ['SMALLINT'], 'impala::MathFunctions::Abs'],
[['abs'], 'TINYINT', ['TINYINT'], 'impala::MathFunctions::Abs'],
[['sign'], 'FLOAT', ['DOUBLE'], 'impala::MathFunctions::Sign'],
[['sin'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sin'],
[['asin'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Asin'],
[['cos'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cos'],
[['acos'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Acos'],
[['tan'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Tan'],
[['atan'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Atan'],
[['atan2'], 'DOUBLE', ['DOUBLE','DOUBLE'], 'impala::MathFunctions::Atan2'],
[['cosh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cosh'],
[['tanh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Tanh'],
[['sinh'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sinh'],
[['cot'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Cot'],
[['radians'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Radians'],
[['degrees'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Degrees'],
[['ceil', 'ceiling', 'dceil'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Ceil'],
[['floor'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Floor'],
[['truncate','dtrunc'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Truncate'],
[['round','dround'], 'BIGINT', ['DOUBLE'], 'impala::MathFunctions::Round'],
[['round','dround'], 'DOUBLE', ['DOUBLE', 'INT'], 'impala::MathFunctions::RoundUpTo'],
[['exp', 'dexp'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Exp'],
[['ln','dlog1'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Ln'],
[['log10','dlog10'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Log10'],
[['log2'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Log2'],
[['log'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::Log'],
[['pow', 'power','dpow','fpow'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::Pow'],
[['sqrt','dsqrt'], 'DOUBLE', ['DOUBLE'], 'impala::MathFunctions::Sqrt'],
[['rand','random'], 'DOUBLE', [], 'impala::MathFunctions::Rand',
'_ZN6impala13MathFunctions11RandPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['factorial'], 'BIGINT', ['TINYINT'], 'impala::Operators::Factorial_TinyIntVal'],
[['factorial'], 'BIGINT', ['SMALLINT'], 'impala::Operators::Factorial_SmallIntVal'],
[['factorial'], 'BIGINT', ['INT'], 'impala::Operators::Factorial_IntVal'],
[['factorial'], 'BIGINT', ['BIGINT'], 'impala::Operators::Factorial_BigIntVal'],
[['rand'], 'DOUBLE', ['BIGINT'], 'impala::MathFunctions::RandSeed',
'_ZN6impala13MathFunctions11RandPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['bin'], 'STRING', ['BIGINT'], 'impala::MathFunctions::Bin'],
[['hex'], 'STRING', ['BIGINT'], 'impala::MathFunctions::HexInt'],
[['hex'], 'STRING', ['STRING'], 'impala::MathFunctions::HexString'],
[['unhex'], 'STRING', ['STRING'], 'impala::MathFunctions::Unhex'],
[['conv'], 'STRING', ['BIGINT', 'TINYINT', 'TINYINT'],
'impala::MathFunctions::ConvInt'],
[['conv'], 'STRING', ['STRING', 'TINYINT', 'TINYINT'],
'impala::MathFunctions::ConvString'],
[['pmod'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::MathFunctions::PmodBigInt'],
[['pmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::PmodDouble'],
[['fmod'], 'FLOAT', ['FLOAT', 'FLOAT'], 'impala::MathFunctions::FmodFloat'],
[['fmod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::FmodDouble'],
[['mod'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Mod_TinyIntVal_TinyIntVal'],
[['mod'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Mod_SmallIntVal_SmallIntVal'],
[['mod'], 'INT', ['INT', 'INT'], 'impala::Operators::Mod_IntVal_IntVal'],
[['mod'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Mod_BigIntVal_BigIntVal'],
[['mod'], 'FLOAT', ['FLOAT', 'FLOAT'], 'impala::MathFunctions::FmodFloat'],
[['mod'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], 'impala::MathFunctions::FmodDouble'],
[['mod'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], 'impala::DecimalOperators::Mod_DecimalVal_DecimalVal'],
[['positive'], 'TINYINT', ['TINYINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'SMALLINT', ['SMALLINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'INT', ['INT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'BIGINT', ['BIGINT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'FLOAT', ['FLOAT'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'DOUBLE', ['DOUBLE'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKS4_'],
[['positive'], 'DECIMAL', ['DECIMAL'],
'_ZN6impala13MathFunctions8PositiveIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'TINYINT', ['TINYINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'SMALLINT', ['SMALLINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'INT', ['INT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'BIGINT', ['BIGINT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'FLOAT', ['FLOAT'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf8FloatValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'DOUBLE', ['DOUBLE'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf9DoubleValEEET_PNS2_15FunctionContextERKS4_'],
[['negative'], 'DECIMAL', ['DECIMAL'],
'_ZN6impala13MathFunctions8NegativeIN10impala_udf10DecimalValEEET_PNS2_15FunctionContextERKS4_'],
[['quotient'], 'BIGINT', ['BIGINT', 'BIGINT'],
'impala::MathFunctions::QuotientBigInt'],
[['quotient'], 'BIGINT', ['DOUBLE', 'DOUBLE'],
'impala::MathFunctions::QuotientDouble'],
[['least'], 'TINYINT', ['TINYINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf10TinyIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf11SmallIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'INT', ['INT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf6IntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'BIGINT', ['BIGINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9BigIntValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'FLOAT', ['FLOAT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf8FloatValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9DoubleValELb1EEET_PNS2_15FunctionContextEiPKS4_'],
[['least'], 'TIMESTAMP', ['TIMESTAMP', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf12TimestampValEPNS2_15FunctionContextEiPKS3_'],
[['least'], 'STRING', ['STRING', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf9StringValEPNS2_15FunctionContextEiPKS3_'],
[['least'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb1EEEN10impala_udf10DecimalValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'TINYINT', ['TINYINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf10TinyIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'SMALLINT', ['SMALLINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf11SmallIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'INT', ['INT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf6IntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'BIGINT', ['BIGINT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9BigIntValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'FLOAT', ['FLOAT', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf8FloatValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'DOUBLE', ['DOUBLE', '...'],
'_ZN6impala13MathFunctions13LeastGreatestIN10impala_udf9DoubleValELb0EEET_PNS2_15FunctionContextEiPKS4_'],
[['greatest'], 'TIMESTAMP', ['TIMESTAMP', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf12TimestampValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'STRING', ['STRING', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf9StringValEPNS2_15FunctionContextEiPKS3_'],
[['greatest'], 'DECIMAL', ['DECIMAL', '...'],
'_ZN6impala13MathFunctions13LeastGreatestILb0EEEN10impala_udf10DecimalValEPNS2_15FunctionContextEiPKS3_'],
# Decimal Functions
# TODO: oracle has decimal support for transcendental functions (e.g. sin()) to very
# high precisions. Do we need them? It's unclear if other databases do the same.
[['precision'], 'INT', ['DECIMAL'], 'impala::DecimalFunctions::Precision'],
[['scale'], 'INT', ['DECIMAL'], 'impala::DecimalFunctions::Scale'],
[['abs'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Abs'],
[['ceil', 'ceiling'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Ceil'],
[['floor'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Floor'],
[['round'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Round'],
[['round'], 'DECIMAL', ['DECIMAL', 'TINYINT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'SMALLINT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'INT'], 'impala::DecimalFunctions::RoundTo'],
[['round'], 'DECIMAL', ['DECIMAL', 'BIGINT'], 'impala::DecimalFunctions::RoundTo'],
[['truncate'], 'DECIMAL', ['DECIMAL'], 'impala::DecimalFunctions::Truncate'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'TINYINT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'SMALLINT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'INT'],
'impala::DecimalFunctions::TruncateTo'],
[['truncate'], 'DECIMAL', ['DECIMAL', 'BIGINT'],
'impala::DecimalFunctions::TruncateTo'],
# String builtin functions
[['substr', 'substring'], 'STRING', ['STRING', 'BIGINT'],
'impala::StringFunctions::Substring'],
[['substr', 'substring'], 'STRING', ['STRING', 'BIGINT', 'BIGINT'],
'impala::StringFunctions::Substring'],
[['split_part'], 'STRING', ['STRING', 'STRING', 'BIGINT'],
'impala::StringFunctions::SplitPart'],
# left and right are key words, leave them out for now.
[['strleft'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Left'],
[['strright'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Right'],
[['space'], 'STRING', ['BIGINT'], 'impala::StringFunctions::Space'],
[['repeat'], 'STRING', ['STRING', 'BIGINT'], 'impala::StringFunctions::Repeat'],
[['lpad'], 'STRING', ['STRING', 'BIGINT', 'STRING'], 'impala::StringFunctions::Lpad'],
[['rpad'], 'STRING', ['STRING', 'BIGINT', 'STRING'], 'impala::StringFunctions::Rpad'],
[['length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['length'], 'INT', ['CHAR'], 'impala::StringFunctions::CharLength'],
[['char_length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['character_length'], 'INT', ['STRING'], 'impala::StringFunctions::Length'],
[['lower', 'lcase'], 'STRING', ['STRING'], 'impala::StringFunctions::Lower'],
[['upper', 'ucase'], 'STRING', ['STRING'], 'impala::StringFunctions::Upper'],
[['initcap'], 'STRING', ['STRING'], 'impala::StringFunctions::InitCap'],
[['reverse'], 'STRING', ['STRING'], 'impala::StringFunctions::Reverse'],
[['translate'], 'STRING', ['STRING', 'STRING', 'STRING'],
'impala::StringFunctions::Translate'],
[['trim'], 'STRING', ['STRING'], 'impala::StringFunctions::Trim'],
[['ltrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Ltrim'],
[['rtrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Rtrim'],
[['ascii'], 'INT', ['STRING'], 'impala::StringFunctions::Ascii'],
[['instr'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::Instr'],
[['locate'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::Locate'],
[['locate'], 'INT', ['STRING', 'STRING', 'BIGINT'],
'impala::StringFunctions::LocatePos'],
[['regexp_extract'], 'STRING', ['STRING', 'STRING', 'BIGINT'],
'impala::StringFunctions::RegexpExtract',
'_ZN6impala15StringFunctions13RegexpPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions11RegexpCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['regexp_replace'], 'STRING', ['STRING', 'STRING', 'STRING'],
'impala::StringFunctions::RegexpReplace',
'_ZN6impala15StringFunctions13RegexpPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions11RegexpCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['regexp_like'], 'BOOLEAN', ['STRING', 'STRING'],
'impala::LikePredicate::Regex',
'_ZN6impala13LikePredicate12RegexPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['regexp_like'], 'BOOLEAN', ['STRING', 'STRING', 'STRING'],
'impala::LikePredicate::RegexpLike',
'_ZN6impala13LikePredicate17RegexpLikePrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala13LikePredicate10RegexCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['concat'], 'STRING', ['STRING', '...'], 'impala::StringFunctions::Concat'],
[['concat_ws'], 'STRING', ['STRING', 'STRING', '...'],
'impala::StringFunctions::ConcatWs'],
[['find_in_set'], 'INT', ['STRING', 'STRING'], 'impala::StringFunctions::FindInSet'],
[['parse_url'], 'STRING', ['STRING', 'STRING'], 'impala::StringFunctions::ParseUrl',
'_ZN6impala15StringFunctions15ParseUrlPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions13ParseUrlCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
[['parse_url'], 'STRING', ['STRING', 'STRING', 'STRING'], 'impala::StringFunctions::ParseUrlKey',
'_ZN6impala15StringFunctions15ParseUrlPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE',
'_ZN6impala15StringFunctions13ParseUrlCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Netezza compatibility char functions
[['chr'], 'STRING', ['INT'], 'impala::StringFunctions::Chr'],
[['btrim'], 'STRING', ['STRING'], 'impala::StringFunctions::Trim'],
[['btrim'], 'STRING', ['STRING', 'STRING'], 'impala::StringFunctions::BTrimString', '_ZN6impala15StringFunctions12BTrimPrepareEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE', '_ZN6impala15StringFunctions10BTrimCloseEPN10impala_udf15FunctionContextENS2_18FunctionStateScopeE'],
# Conditional Functions
# Some of these have empty symbols because the BE special-cases them based on the
# function name
[['if'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN', 'BOOLEAN'], ''],
[['if'], 'TINYINT', ['BOOLEAN', 'TINYINT', 'TINYINT'], ''],
[['if'], 'SMALLINT', ['BOOLEAN', 'SMALLINT', 'SMALLINT'], ''],
[['if'], 'INT', ['BOOLEAN', 'INT', 'INT'], ''],
[['if'], 'BIGINT', ['BOOLEAN', 'BIGINT', 'BIGINT'], ''],
[['if'], 'FLOAT', ['BOOLEAN', 'FLOAT', 'FLOAT'], ''],
[['if'], 'DOUBLE', ['BOOLEAN', 'DOUBLE', 'DOUBLE'], ''],
[['if'], 'STRING', ['BOOLEAN', 'STRING', 'STRING'], ''],
[['if'], 'TIMESTAMP', ['BOOLEAN', 'TIMESTAMP', 'TIMESTAMP'], ''],
[['if'], 'DECIMAL', ['BOOLEAN', 'DECIMAL', 'DECIMAL'], ''],
[['nullif'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['nullif'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['nullif'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['nullif'], 'INT', ['INT', 'INT'], ''],
[['nullif'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['nullif'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['nullif'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['nullif'], 'STRING', ['STRING', 'STRING'], ''],
[['nullif'], 'TIMESTAMP', ['TIMESTAMP', 'TIMESTAMP'], ''],
[['nullif'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['zeroifnull'], 'TINYINT', ['TINYINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'SMALLINT', ['SMALLINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'INT', ['INT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'BIGINT', ['BIGINT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'FLOAT', ['FLOAT'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'DOUBLE', ['DOUBLE'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['zeroifnull'], 'DECIMAL', ['DECIMAL'], 'impala::ConditionalFunctions::ZeroIfNull'],
[['nullifzero'], 'TINYINT', ['TINYINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'SMALLINT', ['SMALLINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'INT', ['INT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'BIGINT', ['BIGINT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'FLOAT', ['FLOAT'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'DOUBLE', ['DOUBLE'], 'impala::ConditionalFunctions::NullIfZero'],
[['nullifzero'], 'DECIMAL', ['DECIMAL'], 'impala::ConditionalFunctions::NullIfZero'],
[['isnull', 'ifnull', 'nvl'], 'BOOLEAN', ['BOOLEAN', 'BOOLEAN'], ''],
[['isnull', 'ifnull', 'nvl'], 'TINYINT', ['TINYINT', 'TINYINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'INT', ['INT', 'INT'], ''],
[['isnull', 'ifnull', 'nvl'], 'BIGINT', ['BIGINT', 'BIGINT'], ''],
[['isnull', 'ifnull', 'nvl'], 'FLOAT', ['FLOAT', 'FLOAT'], ''],
[['isnull', 'ifnull', 'nvl'], 'DOUBLE', ['DOUBLE', 'DOUBLE'], ''],
[['isnull', 'ifnull', 'nvl'], 'STRING', ['STRING', 'STRING'], ''],
[['isnull', 'ifnull', 'nvl'], 'TIMESTAMP', ['TIMESTAMP', 'TIMESTAMP'], ''],
[['isnull', 'ifnull', 'nvl'], 'DECIMAL', ['DECIMAL', 'DECIMAL'], ''],
[['coalesce'], 'BOOLEAN', ['BOOLEAN', '...'], ''],
[['coalesce'], 'TINYINT', ['TINYINT', '...'], ''],
[['coalesce'], 'SMALLINT', ['SMALLINT', '...'], ''],
[['coalesce'], 'INT', ['INT', '...'], ''],
[['coalesce'], 'BIGINT', ['BIGINT', '...'], ''],
[['coalesce'], 'FLOAT', ['FLOAT', '...'], ''],
[['coalesce'], 'DOUBLE', ['DOUBLE', '...'], ''],
[['coalesce'], 'STRING', ['STRING', '...'], ''],
[['coalesce'], 'TIMESTAMP', ['TIMESTAMP', '...'], ''],
[['coalesce'], 'DECIMAL', ['DECIMAL', '...'], ''],
[['istrue'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsTrue'],
[['isnottrue'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsNotTrue'],
[['isfalse'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsFalse'],
[['isnotfalse'], 'BOOLEAN', ['BOOLEAN'], 'impala::ConditionalFunctions::IsNotFalse'],
# Utility functions
[['current_database'], 'STRING', [], 'impala::UtilityFunctions::CurrentDatabase'],
[['user'], 'STRING', [], 'impala::UtilityFunctions::User'],
[['effective_user'], 'STRING', [], 'impala::UtilityFunctions::EffectiveUser'],
[['sleep'], 'BOOLEAN', ['INT'], 'impala::UtilityFunctions::Sleep'],
[['pid'], 'INT', [], 'impala::UtilityFunctions::Pid'],
[['version'], 'STRING', [], 'impala::UtilityFunctions::Version'],
[['typeOf'], 'STRING', ['BOOLEAN'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10BooleanValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['TINYINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10TinyIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['SMALLINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf11SmallIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['INT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf6IntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['BIGINT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9BigIntValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['FLOAT'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf8FloatValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['DOUBLE'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9DoubleValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['CHAR'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['VARCHAR'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['STRING'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf9StringValEEES3_PNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['TIMESTAMP'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf12TimestampValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['typeOf'], 'STRING', ['DECIMAL'], '_ZN6impala16UtilityFunctions6TypeOfIN10impala_udf10DecimalValEEENS2_9StringValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['TINYINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf10TinyIntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['SMALLINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf11SmallIntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['INT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf6IntValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['BIGINT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf9BigIntValEEES3_PNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['FLOAT'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf8FloatValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['DOUBLE'],
'_ZN6impala16UtilityFunctions7FnvHashIN10impala_udf9DoubleValEEENS2_9BigIntValEPNS2_15FunctionContextERKT_'],
[['fnv_hash'], 'BIGINT', ['STRING'],
'_ZN6impala16UtilityFunctions13FnvHashStringEPN10impala_udf15FunctionContextERKNS1_9StringValE'],
[['fnv_hash'], 'BIGINT', ['TIMESTAMP'],
'_ZN6impala16UtilityFunctions16FnvHashTimestampEPN10impala_udf15FunctionContextERKNS1_12TimestampValE'],
[['fnv_hash'], 'BIGINT', ['DECIMAL'],
'_ZN6impala16UtilityFunctions14FnvHashDecimalEPN10impala_udf15FunctionContextERKNS1_10DecimalValE'],
# (Non)NullValue functions
[['nullvalue'], 'BOOLEAN', ['BOOLEAN'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10BooleanValEEES3_PNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['TINYINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10TinyIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['SMALLINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf11SmallIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['INT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf6IntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['BIGINT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9BigIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['FLOAT'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf8FloatValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['DOUBLE'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9DoubleValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['STRING'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf9StringValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['TIMESTAMP'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf12TimestampValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nullvalue'], 'BOOLEAN', ['DECIMAL'], '_ZN6impala15IsNullPredicate6IsNullIN10impala_udf10DecimalValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['BOOLEAN'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10BooleanValEEES3_PNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['TINYINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10TinyIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['SMALLINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf11SmallIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['INT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf6IntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['BIGINT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9BigIntValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['FLOAT'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf8FloatValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['DOUBLE'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9DoubleValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['STRING'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf9StringValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['TIMESTAMP'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf12TimestampValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
[['nonnullvalue'], 'BOOLEAN', ['DECIMAL'], '_ZN6impala15IsNullPredicate9IsNotNullIN10impala_udf10DecimalValEEENS2_10BooleanValEPNS2_15FunctionContextERKT_'],
# Bit and Byte functions
# For functions corresponding to builtin operators, we can reuse the implementations
[['bitand'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitand_TinyIntVal_TinyIntVal'],
[['bitand'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitand_SmallIntVal_SmallIntVal'],
[['bitand'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitand_IntVal_IntVal'],
[['bitand'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitand_BigIntVal_BigIntVal'],
[['bitor'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitor_TinyIntVal_TinyIntVal'],
[['bitor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitor_SmallIntVal_SmallIntVal'],
[['bitor'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitor_IntVal_IntVal'],
[['bitor'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitor_BigIntVal_BigIntVal'],
[['bitxor'], 'TINYINT', ['TINYINT', 'TINYINT'], 'impala::Operators::Bitxor_TinyIntVal_TinyIntVal'],
[['bitxor'], 'SMALLINT', ['SMALLINT', 'SMALLINT'], 'impala::Operators::Bitxor_SmallIntVal_SmallIntVal'],
[['bitxor'], 'INT', ['INT', 'INT'], 'impala::Operators::Bitxor_IntVal_IntVal'],
[['bitxor'], 'BIGINT', ['BIGINT', 'BIGINT'], 'impala::Operators::Bitxor_BigIntVal_BigIntVal'],
[['bitnot'], 'TINYINT', ['TINYINT'], 'impala::Operators::Bitnot_TinyIntVal'],
[['bitnot'], 'SMALLINT', ['SMALLINT'], 'impala::Operators::Bitnot_SmallIntVal'],
[['bitnot'], 'INT', ['INT'], 'impala::Operators::Bitnot_IntVal'],
[['bitnot'], 'BIGINT', ['BIGINT'], 'impala::Operators::Bitnot_BigIntVal'],
[['countset'], 'INT', ['TINYINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf10TinyIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['SMALLINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf11SmallIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf6IntValEEES3_PNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['BIGINT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf9BigIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_'],
[['countset'], 'INT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf10TinyIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['countset'], 'INT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf11SmallIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['countset'], 'INT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf6IntValEEES3_PNS2_15FunctionContextERKT_RKS3_'],
[['countset'], 'INT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions8CountSetIN10impala_udf9BigIntValEEENS2_6IntValEPNS2_15FunctionContextERKT_RKS4_'],
[['getbit'], 'TINYINT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf10TinyIntValEEES3_PNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['getbit'], 'TINYINT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf11SmallIntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['getbit'], 'TINYINT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf6IntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKS3_'],
[['getbit'], 'TINYINT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions6GetBitIN10impala_udf9BigIntValEEENS2_10TinyIntValEPNS2_15FunctionContextERKT_RKNS2_6IntValE'],
[['rotateleft'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateleft'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::RotateLeft'],
[['rotateright'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['rotateright'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::RotateRight'],
[['setbit'], 'TINYINT', ['TINYINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'SMALLINT', ['SMALLINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'INT', ['INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_RKS3_'],
[['setbit'], 'BIGINT', ['BIGINT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValE'],
[['setbit'], 'TINYINT', ['TINYINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf10TinyIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['setbit'], 'SMALLINT', ['SMALLINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf11SmallIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['setbit'], 'INT', ['INT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf6IntValEEET_PNS2_15FunctionContextERKS4_RKS3_SA_'],
[['setbit'], 'BIGINT', ['BIGINT', 'INT', 'INT'], '_ZN6impala16BitByteFunctions6SetBitIN10impala_udf9BigIntValEEET_PNS2_15FunctionContextERKS4_RKNS2_6IntValESB_'],
[['shiftleft'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftleft'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::ShiftLeft'],
[['shiftright'], 'TINYINT', ['TINYINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'SMALLINT', ['SMALLINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'INT', ['INT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
[['shiftright'], 'BIGINT', ['BIGINT', 'INT'], 'impala::BitByteFunctions::ShiftRight'],
]
invisible_functions = [
[['months_add_interval'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_add_interval'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb1EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub_interval'], 'TIMESTAMP', ['TIMESTAMP', 'INT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf6IntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
[['months_sub_interval'], 'TIMESTAMP', ['TIMESTAMP', 'BIGINT'],
'_ZN6impala18TimestampFunctions6AddSubILb0EN10impala_udf9BigIntValEN5boost9date_time15months_durationINS4_9gregorian21greg_durations_configEEELb0EEENS2_12TimestampValEPNS2_15FunctionContextERKSA_RKT0_'],
]
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Conversion API providing document conversion service for applications."""
import logging
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.conversion import conversion_service_pb
from google.appengine.runtime import apiproxy_errors
CONVERSION_MAX_NUM_PER_REQUEST = 10
CONVERSION_MAX_SIZE_BYTES = 2 * (2 ** 20)
_CONVERSION_SERVICE = "conversion"
_CONVERT_METHOD = "Convert"
_IMAGE_WIDTH_FLAG = "imageWidth"
_FIRST_PAGE_FLAG = "firstPage"
_LAST_PAGE_FLAG = "lastPage"
_LAST_PAGE_DEFAULT = -1
_OCR_INPUT_LANGUAGE_FLAG = "input_language_hint"
class Error(Exception):
"""Base-class for exceptions in this module."""
class BackendDeadlineExceeded(Error):
"""Communication to backend service timed-out."""
class TransientError(Error):
"""Transient error while accessing the backend, please try again later."""
class BackendError(Error):
"""Something wrong in the backend that can't be sent back to application."""
class ConversionUnsupported(Error):
"""Unsupported conversion attempted."""
class ConversionTooLarge(Error):
"""The conversion is too large."""
class TooManyConversions(Error):
"""Too many conversions in the request."""
class InvalidRequest(Error):
"""The request was not formed properly."""
def _to_conversion_error(error):
"""Translate an application error to a conversion Error, if possible.
Args:
error: An ApplicationError to translate.
Returns:
error: ConversionApi specific error message.
"""
error_map = {
conversion_service_pb.ConversionServiceError.TIMEOUT:
BackendDeadlineExceeded,
conversion_service_pb.ConversionServiceError.TRANSIENT_ERROR:
TransientError,
conversion_service_pb.ConversionServiceError.INTERNAL_ERROR:
BackendError,
conversion_service_pb.ConversionServiceError.UNSUPPORTED_CONVERSION:
ConversionUnsupported,
conversion_service_pb.ConversionServiceError.CONVERSION_TOO_LARGE:
ConversionTooLarge,
conversion_service_pb.ConversionServiceError.TOO_MANY_CONVERSIONS:
TooManyConversions,
conversion_service_pb.ConversionServiceError.INVALID_REQUEST:
InvalidRequest,
}
if error.application_error in error_map:
return error_map[error.application_error](error.error_detail)
else:
return error
def _to_error_text(error_code):
"""Translate an error code to an error message, if possible.
Args:
error_code: An conversion_service_pb.ConversionServiceError error code.
Returns:
Human readable error message.
"""
error_map = {
conversion_service_pb.ConversionServiceError.TIMEOUT:
"BackendDeadlineExceeded",
conversion_service_pb.ConversionServiceError.TRANSIENT_ERROR:
"TransientError",
conversion_service_pb.ConversionServiceError.INTERNAL_ERROR:
"BackendError",
conversion_service_pb.ConversionServiceError.UNSUPPORTED_CONVERSION:
"ConversionUnsupported",
conversion_service_pb.ConversionServiceError.CONVERSION_TOO_LARGE:
"ConversionTooLarge",
conversion_service_pb.ConversionServiceError.TOO_MANY_CONVERSIONS:
"TooManyConversions",
conversion_service_pb.ConversionServiceError.INVALID_REQUEST:
"InvalidRequest",
}
if error_code in error_map:
return error_map[error_code]
else:
return "UnknownError"
class Asset(object):
"""Represents a single asset in the request.
An asset is a generic blob of data. A conversion document must contain
at least one asset, typically the document contents. Additional assets
are those needed for the conversion, for example images in HTML.
"""
def __init__(self, mime_type, data, name=None):
"""Constructor.
Args:
mime_type: mime type of the asset (string).
data: data to be converted (string).
name: name of the asset (string).
Raises:
TypeError: if input arguments are not string.
"""
if not isinstance(mime_type, basestring):
raise TypeError("mime type %r is not a string" % (mime_type,))
self._mime_type = mime_type.lower()
if not isinstance(data, basestring):
raise TypeError("data %r is not a string" % (data,))
self._data = data
if name is not None:
if not isinstance(name, basestring):
raise TypeError("name %r is not a string" % (name,))
self._name = name
@property
def mime_type(self):
"""The mime type of the asset (string)."""
return self._mime_type
@property
def data(self):
"""The data of the asset (string)."""
return self._data
@property
def name(self):
"""The name of the asset (string)."""
return self._name
def _populate_proto(self, asset_info_pb):
"""Populate an AssetInfo protocol buffer with Asset properties.
Args:
asset_info_pb: An AssetInfo protocol buffer.
"""
asset_info_pb.set_mime_type(self._mime_type)
asset_info_pb.set_data(self._data)
if self._name is not None:
asset_info_pb.set_name(self._name)
class Conversion(object):
"""Represents a single conversion from one file format to another.
A conversion must contain at least one asset, typically the document contents.
Additional assets are those needed for the conversion, for example images in
HTML.
"""
def __init__(self,
asset,
output_mime_type,
image_width=800,
first_page=1,
last_page=None,
ocr_input_language=None):
"""Create a single conversion.
Args:
asset: An Asset instance.
output_mime_type: The output data mime type (string), put into the
output_mime_type field.
image_width: The output image width in pixels. Only applies to
conversions that generate image files.
first_page: The number of the first page to generate. Only applies to
conversions that generate image files.
last_page: The number of the last page to generate, defaults to the last
page of the document. Only applies to conversions that generate image
files.
ocr_input_language: The language code in BCP 47 format, used by OCR engine
to search for language-specific character set.
Raises:
TypeError: if an unexpected type is used for any of the arguments.
ValueError: if an unexpected value is used for any of the arguments.
"""
self._assets = []
if not asset.mime_type:
raise ValueError("Asset mime type should not be empty")
self.add_asset(asset)
Conversion._ensure_nonempty_string(
"output_mime_type", output_mime_type)
self._output_mime_type = output_mime_type.lower()
self._image_width = Conversion._ensure_positive_integer(
"image_width", image_width)
self._first_page = Conversion._ensure_positive_integer(
"first_page", first_page)
self._last_page = None
if last_page is not None:
self._last_page = Conversion._ensure_positive_integer(
"last_page", last_page)
self._ocr_input_language = None
if ocr_input_language is not None:
Conversion._ensure_nonempty_string(
"ocr_input_language", ocr_input_language)
self._ocr_input_language = ocr_input_language.lower()
@staticmethod
def _ensure_nonempty_string(argname, argvalue):
"""Ensure the input argument is a non-empty string.
Args:
argname: The name of argument to check.
argvalue: The value of argument to check.
Raises:
TypeError: if an unexpected type is used for the argument.
ValueError: if an unexpected value is used for the argument.
"""
if not isinstance(argvalue, basestring):
raise TypeError("%s(%r) is not a string" % (argname, argvalue))
if not argvalue:
raise ValueError("%s(%r) must not be empty" % (argname, argvalue))
@staticmethod
def _ensure_positive_integer(argname, argvalue):
"""Ensure the input argument is a positive integer.
Args:
argname: The name of argument to check.
argvalue: The value of argument to check.
Returns:
The input argument value.
Raises:
TypeError: if an unexpected type is used for the argument.
ValueError: if an unexpected value is used for the argument.
"""
if not isinstance(argvalue, (int, long)):
raise TypeError("%s(%r) is not integer or long" % (argname, argvalue))
if argvalue <= 0:
raise ValueError("%s(%r) must be larger than 0" % (argname, argvalue))
return argvalue
def add_asset(self, asset):
"""Add an asset into the conversion request.
Args:
asset: An Asset instance.
Raises:
TypeError: if the asset is not an Asset instance.
"""
if not isinstance(asset, Asset):
raise TypeError("Input %r is not an Asset instance" % asset)
self._assets.append(asset)
def _populate_proto(self, conversion_input_pb):
"""Populate a ConversionInput protocol buffer with Conversion instance.
Args:
conversion_input_pb: A ConversionInput protocol buffer.
"""
for asset in self._assets:
asset_pb = conversion_input_pb.mutable_input().add_asset()
asset._populate_proto(asset_pb)
conversion_input_pb.set_output_mime_type(self._output_mime_type)
self._populate_flags(conversion_input_pb)
def _populate_flags(self, conversion_input_pb):
"""Populate a ConversionInput protocol buffer with auxiliary parameters.
Args:
conversion_input_pb: A ConversionInput protocol buffer.
"""
image_width_flag = conversion_input_pb.add_flag()
image_width_flag.set_key(_IMAGE_WIDTH_FLAG)
image_width_flag.set_value(str(self._image_width))
first_page_flag = conversion_input_pb.add_flag()
first_page_flag.set_key(_FIRST_PAGE_FLAG)
first_page_flag.set_value(str(self._first_page))
last_page_flag = conversion_input_pb.add_flag()
last_page_flag.set_key(_LAST_PAGE_FLAG)
if self._last_page is not None:
last_page_flag.set_value(str(self._last_page))
else:
last_page_flag.set_value(str(_LAST_PAGE_DEFAULT))
if self._ocr_input_language is not None:
ocr_input_language_flag = conversion_input_pb.add_flag()
ocr_input_language_flag.set_key(_OCR_INPUT_LANGUAGE_FLAG)
ocr_input_language_flag.set_value(self._ocr_input_language)
class ConversionOutput(object):
"""Represents a single conversion output from the response.
A conversion output includes the error code and a list of converted assets.
"""
def __init__(self, conversion_output_proto):
"""Constructor.
Args:
conversion_output_proto: the ConversionOutput protocol buffer.
Raises:
AssertionError: if asset_info_proto is not an AssetInfo protocol buffer.
"""
assert isinstance(conversion_output_proto,
conversion_service_pb.ConversionOutput)
self._error_code = conversion_output_proto.error_code()
self._error_text = "OK"
if self._error_code != conversion_service_pb.ConversionServiceError.OK:
self._error_text = _to_error_text(self._error_code)
self._assets = []
for asset_pb in conversion_output_proto.output().asset_list():
self._assets.append(Asset(
asset_pb.mime_type(), asset_pb.data(), asset_pb.name()))
@property
def error_code(self):
"""The error code of this conversion."""
return self._error_code
@property
def error_text(self):
"""The error message of this conversion if not successful."""
return self._error_text
@property
def assets(self):
"""A list of converted assets in the format of Asset instances."""
return self._assets
def convert(conversion, deadline=None):
"""Makes all conversions in parallel, blocking until all results are returned.
Args:
conversion: A Conversion instance or a list of Conversion instances.
deadline: Optional deadline in seconds for all the conversions.
Returns:
A ConverionOutput instance if conversion is a Conversion instance. Or a list
of ConversionOutput instances, one per Conversion in the same order.
Raises:
TypeError: Input conversions with wrong type.
See more details in _to_conversion_error function.
"""
rpc = create_rpc(deadline=deadline)
make_convert_call(rpc, conversion)
return rpc.get_result()
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the Conversion API.
Args:
deadline: Optional deadline in seconds for the operation; the default is
a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An apiproxy_stub_map.UserRPC object specialized for this service.
"""
return apiproxy_stub_map.UserRPC(_CONVERSION_SERVICE, deadline, callback)
def make_convert_call(rpc, conversion):
"""Executes the RPC call to do the conversions.
The result can then be got from rpc.get_result which will call
_get_convert_result. See the docstring there for more details.
Args:
rpc: a UserRPC instance.
conversion: A Conversion instance or a list of Conversion instances.
Raises:
TypeError: Input conversions with wrong type.
See more details in _to_conversion_error function.
"""
logging.warning("The Conversion API will be decommissioned in November 2012 "
"and all calls to it will return an error.")
request = conversion_service_pb.ConversionRequest()
response = conversion_service_pb.ConversionResponse()
try:
conversions = list(iter(conversion))
except TypeError:
conversions = [conversion]
multiple = False
else:
multiple = True
for conversion in conversions:
if isinstance(conversion, Conversion):
conversion_input_pb = request.add_conversion()
conversion._populate_proto(conversion_input_pb)
else:
raise TypeError("conversion must be a Conversion instance "
"or a list of Conversion instances")
rpc.make_call(_CONVERT_METHOD, request, response,
_get_convert_result, user_data=multiple)
def _get_convert_result(rpc):
"""Check success, handle exceptions, and return conversion results.
Args:
rpc: A UserRPC instance.
Returns:
A ConverionOutput instance if conversion is a Conversion instance. Or a list
of ConversionOutput instances, one per Conversion in the same order.
Raises:
See more details in _to_conversion_error function.
"""
assert rpc.service == _CONVERSION_SERVICE, repr(rpc.service)
assert rpc.method == _CONVERT_METHOD, repr(rpc.method)
try:
rpc.check_success()
except apiproxy_errors.ApplicationError, e:
raise _to_conversion_error(e)
results = []
for output_pb in rpc.response.result_list():
results.append(ConversionOutput(output_pb))
multiple = rpc.user_data
if multiple:
return results
else:
assert len(results) == 1
return results[0]
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from logcollector.openstack.common import excutils
from logcollector.openstack.common.gettextutils import _
from logcollector.openstack.common import importutils
from logcollector.openstack.common import jsonutils
from logcollector.openstack.common import log as logging
from logcollector.openstack.common.rpc import amqp as rpc_amqp
from logcollector.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
import numpy as np
import pandas as pd
from deap import algorithms, base, creator, tools
import copy
import logging
import random
from precis import abbreviate
from precis import evaluate
from precis import plot
from precis.base import AbbreviatedMeasure
class Generator:
'''Generates abbreviated measures.
Args:
abbreviator (Abbreviator): The KeyGenerator to use to generate a
scoring key from the data. If None, uses the top N absolute
correlation approach in Yarkoni (2010).
evaluator (Evaluator): The LossFunction to minimize using the GA. If
None, uses the loss function in Yarkoni (2010).
cross_validate (bool): Whether or not to use split-half
cross-validation.
kwargs: Optional arguments to pass to DEAP.
'''
def __init__(self, abbreviator=None, evaluator=None, cross_validate=False,
**kwargs):
self.cross_val = cross_validate
if abbreviator is None:
abbreviator = abbreviate.TopNAbbreviator()
self.abbreviator = abbreviator
if evaluator is None:
evaluator = evaluate.YarkoniEvaluator()
self.evaluator = evaluator
# Deap settings
self.zero_to_one_ratio = kwargs.get('zero_to_one_ratio', 0.5)
self.indpb = kwargs.get('indpb', 0.05)
self.tourn_size = kwargs.get('tourn_size', 3)
self.pop_size = kwargs.get('pop_size', 200)
self.cxpb = kwargs.get('cxpb', 0.8)
self.mutpb = kwargs.get('mutpb', 0.2)
# Reset stats and logging
self.reset()
def _random_boolean(self, zero_to_one_ratio):
return random.random() < zero_to_one_ratio
def run(self, measure, n_gens=100, seed=None, resume=False, **kwargs):
''' Main abbreviated measure generation function.
Args:
measure (Measure): A Measure instance to abbreviate
n_gens (int): Number of generations to run GA for
seed (int): Optional integer to use as random seed
resume (bool): If True, AND the measure passed is the same as the
one already stored, AND the Generator has been run before, then
pick up where we left off--i.e., start with the last population
produced instead of initializing a new one.
kwargs: Additional keywords to pass on to the evaluation method of
the current LossFunction class.
Returns: A list of items included in the abbreviated measure.
'''
# Set random seed for both native Python and Numpy, to be safe
random.seed(seed)
np.random.seed(seed)
# Set up the GA
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register(
"attr_bool", self._random_boolean, self.zero_to_one_ratio)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_bool, measure.n_X)
toolbox.register(
"population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", self.evaluate)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=self.indpb)
toolbox.register(
"select", tools.selTournament, tournsize=self.tourn_size)
self.measure = measure
self.evaluation_keywords = kwargs
# Cross-validation
if self.cross_val:
inds = range(self.measure.n_subjects)
random.shuffle(inds)
self.train_subs = [x for i, x in enumerate(inds) if i % 2 != 0]
self.test_subs = [x for x in inds if x not in self.train_subs]
self.test_measure = copy.deepcopy(self.measure)
self.measure.select_subjects(self.train_subs)
self.test_measure.select_subjects(self.test_subs)
# Initialize population or pick up where we left off.
if resume and self.measure == measure and hasattr(self, 'pop'):
pop = self.pop
else:
self.reset()
pop = toolbox.population(n=self.pop_size)
self._evolve(
measure, pop, toolbox, n_gens, cxpb=self.cxpb, mutpb=self.mutpb)
def abbreviate(self, trim=False, stats=True, keep_original_labels=True):
final_items = self.best_individuals[-1]
# If cross-validation was used, activate the hold-out subjects
measure = self.test_measure if self.cross_val else self.measure
self.best = AbbreviatedMeasure(
measure, final_items, abbreviator=self.abbreviator,
evaluator=self.evaluator, trim=trim, stats=stats,
keep_original_labels=keep_original_labels)
return self.best
def _evolve(self, measure, population, toolbox, ngen, cxpb, mutpb,
verbose=True):
''' Main evolution algorithm. A tweaked version of the eaSimple
algorithm included in the DEAP package that adds per-generation logging
of the best individual's properties and drops all the
Statistics/HallOfFame stuff (since we're handling that ourselves). See
DEAP documentation of algorithms.eaSimple() for all arguments.
'''
# if verbose:
# column_names = ["gen", "evals"]
# if stats is not None:
# column_names += stats.functions.keys()
# logger = tools.Logbook(column_names)
# logger.logHeader()
# logger.logGeneration(evals=len(population), gen=0, stats=stats)
# Begin the generational process
for gen in range(0, ngen):
# Select the next generation individuals
offspring = toolbox.select(population, k=len(population))
# Variate the pool of individuals
offspring = algorithms.varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Replace the current population by the offspring
offspring = sorted(
offspring, key=lambda x: x.fitness, reverse=True)
population[:] = offspring
# Save best individual as an AbbreviatedMeasure
self.best_individuals.append(population[0])
best_abb = AbbreviatedMeasure(
self.measure, population[0], abbreviator=self.abbreviator,
evaluator=self.evaluator, stats=True)
self.best_measures.append(best_abb)
r_squared = np.round(best_abb.r_squared.mean(), 2)
n_items = np.sum(population[0])
# Update the statistics with the new population
if self.stats is not None:
record = self.stats.compile(population)
self.logbook.record(
gen=gen, r_squared=r_squared, n_items=n_items, **record)
# Save last population in case we want to resume
self.pop = population
# if verbose:
# logger.logGeneration(evals=len(invalid_ind), gen=gen, stats=stats)
def evaluate(self, individual):
m = self.abbreviator.abbreviate_apply(
self.measure.dataset, select=individual)
loss = self.evaluator.evaluate(m, **self.evaluation_keywords)
return (loss, )
# def save(self):
# ''' Save results of abbreviation. '''
# pass
def reset(self):
''' Reset the Generator, removing all history, logging, and stats. '''
self.logbook = tools.Logbook()
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", min)
self.stats = stats
# Store best individual in each generation, and associated measure
self.best_individuals = []
self.best_measures = []
def plot_history(self, **kwargs):
''' Convenience wrapper for history() in plot module. '''
return plot.history(self, **kwargs)
|
|
#!/usr/bin/python
"""
PDU
"""
import re
import socket
import struct
try:
import netifaces
except ImportError:
netifaces = None
from .settings import settings
from .debugging import ModuleLogger, bacpypes_debugging, btox, xtob
from .comm import PCI as _PCI, PDUData
# pack/unpack constants
_short_mask = 0xFFFFL
_long_mask = 0xFFFFFFFFL
# some debugging
_debug = 0
_log = ModuleLogger(globals())
#
# Address
#
_field_address = r"((?:\d+)|(?:0x(?:[0-9A-Fa-f][0-9A-Fa-f])+))"
_ip_address_port = r"(\d+\.\d+\.\d+\.\d+)(?::(\d+))?"
_ip_address_mask_port = r"(\d+\.\d+\.\d+\.\d+)(?:/(\d+))?(?::(\d+))?"
_net_ip_address_port = r"(\d+):" + _ip_address_port
_at_route = "(?:[@](?:" + _field_address + "|" + _ip_address_port + "))?"
field_address_re = re.compile("^" + _field_address + "$")
ip_address_port_re = re.compile("^" + _ip_address_port + "$")
ip_address_mask_port_re = re.compile("^" + _ip_address_mask_port + "$")
net_ip_address_port_re = re.compile("^" + _net_ip_address_port + "$")
net_ip_address_mask_port_re = re.compile("^" + _net_ip_address_port + "$")
ethernet_re = re.compile(r'^([0-9A-Fa-f][0-9A-Fa-f][:]){5}([0-9A-Fa-f][0-9A-Fa-f])$' )
interface_re = re.compile(r'^(?:([\w]+))(?::(\d+))?$')
net_broadcast_route_re = re.compile("^([0-9])+:[*]" + _at_route + "$")
net_station_route_re = re.compile("^([0-9])+:" + _field_address + _at_route + "$")
net_ip_address_route_re = re.compile("^([0-9])+:" + _ip_address_port + _at_route + "$")
combined_pattern = re.compile("^(?:(?:([0-9]+)|([*])):)?(?:([*])|" + _field_address + "|" + _ip_address_mask_port + ")" + _at_route + "$")
@bacpypes_debugging
class Address:
nullAddr = 0
localBroadcastAddr = 1
localStationAddr = 2
remoteBroadcastAddr = 3
remoteStationAddr = 4
globalBroadcastAddr = 5
def __init__(self, *args):
if _debug: Address._debug("__init__ %r", args)
self.addrType = Address.nullAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = None
if len(args) == 1:
self.decode_address(args[0])
elif len(args) == 2:
self.decode_address(args[1])
if self.addrType == Address.localStationAddr:
self.addrType = Address.remoteStationAddr
self.addrNet = args[0]
elif self.addrType == Address.localBroadcastAddr:
self.addrType = Address.remoteBroadcastAddr
self.addrNet = args[0]
else:
raise ValueError("unrecognized address ctor form")
def decode_address(self, addr):
"""Initialize the address from a string. Lots of different forms are supported."""
if _debug: Address._debug("decode_address %r (%s)", addr, type(addr))
# start out assuming this is a local station and didn't get routed
self.addrType = Address.localStationAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = None
if addr == "*":
if _debug: Address._debug(" - localBroadcast")
self.addrType = Address.localBroadcastAddr
elif addr == "*:*":
if _debug: Address._debug(" - globalBroadcast")
self.addrType = Address.globalBroadcastAddr
elif isinstance(addr, int):
if _debug: Address._debug(" - int")
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, basestring):
if _debug: Address._debug(" - str")
m = combined_pattern.match(addr)
if m:
if _debug: Address._debug(" - combined pattern")
(net, global_broadcast,
local_broadcast,
local_addr,
local_ip_addr, local_ip_net, local_ip_port,
route_addr, route_ip_addr, route_ip_port
) = m.groups()
if global_broadcast and local_broadcast:
if _debug: Address._debug(" - global broadcast")
self.addrType = Address.globalBroadcastAddr
elif net and local_broadcast:
if _debug: Address._debug(" - remote broadcast")
net_addr = int(net)
if (net_addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = net_addr
elif local_broadcast:
if _debug: Address._debug(" - local broadcast")
self.addrType = Address.localBroadcastAddr
elif net:
if _debug: Address._debug(" - remote station")
net_addr = int(net)
if (net_addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net_addr
if local_addr:
if _debug: Address._debug(" - simple address")
if local_addr.startswith("0x"):
self.addrAddr = xtob(local_addr[2:])
self.addrLen = len(self.addrAddr)
else:
local_addr = int(local_addr)
if local_addr >= 256:
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', local_addr)
self.addrLen = 1
if local_ip_addr:
if _debug: Address._debug(" - ip address")
if not local_ip_port:
local_ip_port = '47808'
if not local_ip_net:
local_ip_net = '32'
self.addrPort = int(local_ip_port)
self.addrTuple = (local_ip_addr, self.addrPort)
if _debug: Address._debug(" - addrTuple: %r", self.addrTuple)
addrstr = socket.inet_aton(local_ip_addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = (_long_mask << (32 - int(local_ip_net))) & _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
bcast = (self.addrSubnet | ~self.addrMask)
self.addrBroadcastTuple = (socket.inet_ntoa(struct.pack('!L', bcast & _long_mask)), self.addrPort)
if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
if (not settings.route_aware) and (route_addr or route_ip_addr):
Address._warning("route provided but not route aware: %r", addr)
if route_addr:
if route_addr.startswith("0x"):
self.addrRoute = Address(xtob(route_addr[2:]))
else:
self.addrRoute = Address(int(route_addr))
if _debug: Address._debug(" - addrRoute: %r", self.addrRoute)
elif route_ip_addr:
if not route_ip_port:
route_ip_port = '47808'
self.addrRoute = Address((route_ip_addr, int(route_ip_port)))
if _debug: Address._debug(" - addrRoute: %r", self.addrRoute)
return
if ethernet_re.match(addr):
if _debug: Address._debug(" - ethernet")
self.addrAddr = xtob(addr, ':')
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+$", addr):
if _debug: Address._debug(" - int")
addr = int(addr)
if (addr > 255):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
return
if re.match(r"^\d+:[*]$", addr):
if _debug: Address._debug(" - remote broadcast")
addr = int(addr[:-2])
if (addr >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = addr
self.addrAddr = None
self.addrLen = None
return
if re.match(r"^\d+:\d+$",addr):
if _debug: Address._debug(" - remote station")
net, addr = addr.split(':')
net = int(net)
addr = int(addr)
if (net >= 65535):
raise ValueError("network out of range")
if (addr > 255):
raise ValueError("address out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
return
if re.match(r"^0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - modern hex string")
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - old school hex string")
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+:0x([0-9A-Fa-f][0-9A-Fa-f])+$",addr):
if _debug: Address._debug(" - remote station with modern hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:])
self.addrLen = len(self.addrAddr)
return
if re.match(r"^\d+:X'([0-9A-Fa-f][0-9A-Fa-f])+'$",addr):
if _debug: Address._debug(" - remote station with old school hex string")
net, addr = addr.split(':')
net = int(net)
if (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrAddr = xtob(addr[2:-1])
self.addrLen = len(self.addrAddr)
return
if netifaces and interface_re.match(addr):
if _debug: Address._debug(" - interface name with optional port")
interface, port = interface_re.match(addr).groups()
if port is not None:
self.addrPort = int(port)
else:
self.addrPort = 47808
interfaces = netifaces.interfaces()
if interface not in interfaces:
raise ValueError("not an interface: %s" % (interface,))
if _debug: Address._debug(" - interfaces: %r", interfaces)
ifaddresses = netifaces.ifaddresses(interface)
if netifaces.AF_INET not in ifaddresses:
raise ValueError("interface does not support IPv4: %s" % (interface,))
ipv4addresses = ifaddresses[netifaces.AF_INET]
if len(ipv4addresses) > 1:
raise ValueError("interface supports multiple IPv4 addresses: %s" % (interface,))
ifaddress = ipv4addresses[0]
if _debug: Address._debug(" - ifaddress: %r", ifaddress)
addr = ifaddress['addr']
self.addrTuple = (addr, self.addrPort)
if _debug: Address._debug(" - addrTuple: %r", self.addrTuple)
addrstr = socket.inet_aton(addr)
self.addrIP = struct.unpack('!L', addrstr)[0]
if 'netmask' in ifaddress:
maskstr = socket.inet_aton(ifaddress['netmask'])
self.addrMask = struct.unpack('!L', maskstr)[0]
else:
self.addrMask = _long_mask
self.addrHost = (self.addrIP & ~self.addrMask)
self.addrSubnet = (self.addrIP & self.addrMask)
if 'broadcast' in ifaddress:
self.addrBroadcastTuple = (ifaddress['broadcast'], self.addrPort)
else:
self.addrBroadcastTuple = None
if _debug: Address._debug(" - addrBroadcastTuple: %r", self.addrBroadcastTuple)
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
return
raise ValueError("unrecognized format")
elif isinstance(addr, tuple):
addr, port = addr
self.addrPort = int(port)
if isinstance(addr, basestring):
if not addr:
# when ('', n) is passed it is the local host address, but that
# could be more than one on a multihomed machine, the empty string
# means "any".
addrstr = b'\0\0\0\0'
else:
addrstr = socket.inet_aton(addr)
self.addrTuple = (addr, self.addrPort)
elif isinstance(addr, (int, long)):
addrstr = struct.pack('!L', addr & _long_mask)
self.addrTuple = (socket.inet_ntoa(addrstr), self.addrPort)
else:
raise TypeError("tuple must be (string, port) or (long, port)")
if _debug: Address._debug(" - addrstr: %r", addrstr)
self.addrIP = struct.unpack('!L', addrstr)[0]
self.addrMask = _long_mask
self.addrHost = None
self.addrSubnet = None
self.addrBroadcastTuple = self.addrTuple
self.addrAddr = addrstr + struct.pack('!H', self.addrPort & _short_mask)
self.addrLen = 6
else:
raise TypeError("integer, string or tuple required")
def __str__(self):
if self.addrType == Address.nullAddr:
rslt = 'Null'
elif self.addrType == Address.localBroadcastAddr:
rslt = '*'
elif self.addrType == Address.localStationAddr:
rslt = ''
if self.addrLen == 1:
rslt += str(ord(self.addrAddr))
else:
port = struct.unpack('!H', self.addrAddr[-2:])[0]
if (len(self.addrAddr) == 6) and (port >= 47808) and (port <= 47823):
rslt += '.'.join(["%d" % ord(x) for x in self.addrAddr[0:4]])
if port != 47808:
rslt += ':' + str(port)
else:
rslt += '0x' + btox(self.addrAddr)
elif self.addrType == Address.remoteBroadcastAddr:
rslt = '%d:*' % (self.addrNet,)
elif self.addrType == Address.remoteStationAddr:
rslt = '%d:' % (self.addrNet,)
if self.addrLen == 1:
rslt += str(ord(self.addrAddr[0]))
else:
port = struct.unpack('!H', self.addrAddr[-2:])[0]
if (len(self.addrAddr) == 6) and (port >= 47808) and (port <= 47823):
rslt += '.'.join(["%d" % ord(x) for x in self.addrAddr[0:4]])
if port != 47808:
rslt += ':' + str(port)
else:
rslt += '0x' + btox(self.addrAddr)
elif self.addrType == Address.globalBroadcastAddr:
rslt = "*:*"
else:
raise TypeError("unknown address type %d" % self.addrType)
if self.addrRoute:
rslt += "@" + str(self.addrRoute)
return rslt
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__str__())
def _tuple(self):
if (not settings.route_aware) or (self.addrRoute is None):
return (self.addrType, self.addrNet, self.addrAddr, None)
else:
return (self.addrType, self.addrNet, self.addrAddr, self.addrRoute._tuple())
def __hash__(self):
return hash(self._tuple())
def __eq__(self, arg):
# try an coerce it into an address
if not isinstance(arg, Address):
arg = Address(arg)
# basic components must match
rslt = (self.addrType == arg.addrType)
rslt = rslt and (self.addrNet == arg.addrNet)
rslt = rslt and (self.addrAddr == arg.addrAddr)
# if both have routes they must match
if rslt and self.addrRoute and arg.addrRoute:
rslt = rslt and (self.addrRoute == arg.addrRoute)
return rslt
def __ne__(self, arg):
return not self.__eq__(arg)
def __lt__(self, arg):
return self._tuple() < arg._tuple()
def dict_contents(self, use_dict=None, as_class=None):
"""Return the contents of an object as a dict."""
if _debug: _log.debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# exception to the rule of returning a dict
return str(self)
#
# pack_ip_addr, unpack_ip_addr
#
def pack_ip_addr(addr):
"""Given an IP address tuple like ('1.2.3.4', 47808) return the six-octet string
useful for a BACnet address."""
addr, port = addr
return socket.inet_aton(addr) + struct.pack('!H', port & _short_mask)
def unpack_ip_addr(addr):
"""Given a six-octet BACnet address, return an IP address tuple."""
return (socket.inet_ntoa(addr[0:4]), struct.unpack('!H', addr[4:6])[0])
#
# LocalStation
#
class LocalStation(Address):
def __init__(self, addr, route=None):
self.addrType = Address.localStationAddr
self.addrNet = None
self.addrRoute = route
if isinstance(addr, int):
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, (bytes, bytearray)):
if _debug: Address._debug(" - bytes or bytearray")
self.addrAddr = bytes(addr)
self.addrLen = len(addr)
else:
raise TypeError("integer, bytes or bytearray required")
#
# RemoteStation
#
class RemoteStation(Address):
def __init__(self, net, addr, route=None):
if not isinstance(net, int):
raise TypeError("integer network required")
if (net < 0) or (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteStationAddr
self.addrNet = net
self.addrRoute = route
if isinstance(addr, int):
if (addr < 0) or (addr >= 256):
raise ValueError("address out of range")
self.addrAddr = struct.pack('B', addr)
self.addrLen = 1
elif isinstance(addr, (bytes, bytearray)):
if _debug: Address._debug(" - bytes or bytearray")
self.addrAddr = bytes(addr)
self.addrLen = len(addr)
else:
raise TypeError("integer, bytes or bytearray required")
#
# LocalBroadcast
#
class LocalBroadcast(Address):
def __init__(self, route=None):
self.addrType = Address.localBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# RemoteBroadcast
#
class RemoteBroadcast(Address):
def __init__(self, net, route=None):
if not isinstance(net, int):
raise TypeError("integer network required")
if (net < 0) or (net >= 65535):
raise ValueError("network out of range")
self.addrType = Address.remoteBroadcastAddr
self.addrNet = net
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# GlobalBroadcast
#
class GlobalBroadcast(Address):
def __init__(self, route=None):
self.addrType = Address.globalBroadcastAddr
self.addrNet = None
self.addrAddr = None
self.addrLen = None
self.addrRoute = route
#
# PCI
#
@bacpypes_debugging
class PCI(_PCI):
_debug_contents = ('pduExpectingReply', 'pduNetworkPriority')
def __init__(self, *args, **kwargs):
if _debug: PCI._debug("__init__ %r %r", args, kwargs)
# split out the keyword arguments that belong to this class
my_kwargs = {}
other_kwargs = {}
for element in ('expectingReply', 'networkPriority'):
if element in kwargs:
my_kwargs[element] = kwargs[element]
for kw in kwargs:
if kw not in my_kwargs:
other_kwargs[kw] = kwargs[kw]
if _debug: PCI._debug(" - my_kwargs: %r", my_kwargs)
if _debug: PCI._debug(" - other_kwargs: %r", other_kwargs)
# call some superclass, if there is one
super(PCI, self).__init__(*args, **other_kwargs)
# set the attribute/property values for the ones provided
self.pduExpectingReply = my_kwargs.get('expectingReply', 0) # see 6.2.2 (1 or 0)
self.pduNetworkPriority = my_kwargs.get('networkPriority', 0) # see 6.2.2 (0..3)
def update(self, pci):
"""Copy the PCI fields."""
_PCI.update(self, pci)
# now do the BACnet PCI fields
self.pduExpectingReply = pci.pduExpectingReply
self.pduNetworkPriority = pci.pduNetworkPriority
def pci_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PCI._debug("pci_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# call the parent class
_PCI.pci_contents(self, use_dict=use_dict, as_class=as_class)
# save the values
use_dict.__setitem__('expectingReply', self.pduExpectingReply)
use_dict.__setitem__('networkPriority', self.pduNetworkPriority)
# return what we built/updated
return use_dict
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PCI._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
return self.pci_contents(use_dict=use_dict, as_class=as_class)
#
# PDU
#
@bacpypes_debugging
class PDU(PCI, PDUData):
def __init__(self, *args, **kwargs):
if _debug: PDU._debug("__init__ %r %r", args, kwargs)
super(PDU, self).__init__(*args, **kwargs)
def __str__(self):
return '<%s %s -> %s : %s>' % (self.__class__.__name__, self.pduSource, self.pduDestination, btox(self.pduData,'.'))
def dict_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
if _debug: PDUData._debug("dict_contents use_dict=%r as_class=%r", use_dict, as_class)
# make/extend the dictionary of content
if use_dict is None:
use_dict = as_class()
# call into the two base classes
self.pci_contents(use_dict=use_dict, as_class=as_class)
self.pdudata_contents(use_dict=use_dict, as_class=as_class)
# return what we built/updated
return use_dict
|
|
import os
import uuid
from datetime import date, datetime, time
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import sql, manager
from django.contrib.auth.models import User
### For basic tests and bench
class Category(models.Model):
title = models.CharField(max_length=128)
def __unicode__(self):
return self.title
class Post(models.Model):
title = models.CharField(max_length=128)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='posts')
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class Extra(models.Model):
post = models.OneToOneField(Post, on_delete=models.CASCADE)
tag = models.IntegerField(db_column='custom_column_name', unique=True)
to_tag = models.ForeignKey('self', on_delete=models.CASCADE, to_field='tag', null=True)
def __unicode__(self):
return 'Extra(post_id=%s, tag=%s)' % (self.post_id, self.tag)
### Specific and custom fields
class CustomValue(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __eq__(self, other):
return isinstance(other, CustomValue) and self.value == other.value
class CustomField(models.Field):
def db_type(self, connection):
return 'text'
def to_python(self, value):
if isinstance(value, CustomValue):
return value
return CustomValue(value)
def from_db_value(self, value, expession, conn):
return self.to_python(value)
def get_prep_value(self, value):
return value.value
class CustomWhere(sql.where.WhereNode):
pass
class CustomQuery(sql.Query):
pass
class CustomManager(models.Manager):
def get_query_set(self):
q = CustomQuery(self.model, CustomWhere)
return QuerySet(self.model, q)
get_queryset = get_query_set
class IntegerArrayField(models.Field):
def db_type(self, connection):
return 'text'
def to_python(self, value):
if value in (None, ''):
return None
if isinstance(value, list):
return value
return [int(v) for v in value.split(',')]
def from_db_value(self, value, expession, conn):
return self.to_python(value)
def get_prep_value(self, value):
return ','.join(map(str, value))
def custom_value_default():
return CustomValue('default')
class Weird(models.Model):
date_field = models.DateField(default=date(2000, 1, 1))
datetime_field = models.DateTimeField(default=datetime(2000, 1, 1, 10, 10))
time_field = models.TimeField(default=time(10, 10))
list_field = IntegerArrayField(default=list, blank=True)
custom_field = CustomField(default=custom_value_default)
binary_field = models.BinaryField()
objects = models.Manager()
customs = CustomManager()
# TODO: check other new fields:
# - PostgreSQL ones: HStoreField, RangeFields, unaccent
# - Other: DurationField
if os.environ.get('CACHEOPS_DB') in {'postgresql', 'postgis'}:
from django.contrib.postgres.fields import ArrayField
try:
from django.db.models import JSONField
except ImportError:
try:
from django.contrib.postgres.fields import JSONField # Used before Django 3.1
except ImportError:
JSONField = None
class TaggedPost(models.Model):
name = models.CharField(max_length=200)
tags = ArrayField(models.IntegerField())
if JSONField:
meta = JSONField()
# 16
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
tag = models.IntegerField()
# Proxy model
class Video(models.Model):
title = models.CharField(max_length=128)
class VideoProxy(Video):
class Meta:
proxy = True
class NonCachedVideoProxy(Video):
class Meta:
proxy = True
class NonCachedMedia(models.Model):
title = models.CharField(max_length=128)
class MediaProxy(NonCachedMedia):
class Meta:
proxy = True
# Multi-table inheritance
class Media(models.Model):
name = models.CharField(max_length=128)
class Movie(Media):
year = models.IntegerField()
# M2M models
class Label(models.Model):
text = models.CharField(max_length=127, blank=True, default='')
class Brand(models.Model):
labels = models.ManyToManyField(Label, related_name='brands')
# M2M with explicit through models
class LabelT(models.Model):
text = models.CharField(max_length=127, blank=True, default='')
class BrandT(models.Model):
labels = models.ManyToManyField(LabelT, related_name='brands', through='Labeling')
class Labeling(models.Model):
label = models.ForeignKey(LabelT, on_delete=models.CASCADE)
brand = models.ForeignKey(BrandT, on_delete=models.CASCADE)
tag = models.IntegerField()
class PremiumBrand(Brand):
extra = models.CharField(max_length=127, blank=True, default='')
# local_get
class Local(models.Model):
tag = models.IntegerField(null=True)
# 45
class CacheOnSaveModel(models.Model):
title = models.CharField(max_length=32)
# 47
class DbAgnostic(models.Model):
pass
class DbBinded(models.Model):
pass
# contrib.postgis
if os.environ.get('CACHEOPS_DB') == 'postgis':
from django.contrib.gis.db import models as gis_models
class Geometry(gis_models.Model):
point = gis_models.PointField(geography=True, dim=3, blank=True, null=True, default=None)
# 145
class One(models.Model):
boolean = models.BooleanField(default=False)
def set_boolean_true(sender, instance, created, **kwargs):
if created:
return
dialog = One.objects.cache().get(id=instance.id)
assert dialog.boolean is True
from django.db.models.signals import post_save
post_save.connect(set_boolean_true, sender=One)
# 312
class Device(models.Model):
uid = models.UUIDField(default=uuid.uuid4)
model = models.CharField(max_length=64)
# 333
class CustomQuerySet(QuerySet):
pass
class CustomFromQSManager(manager.BaseManager.from_queryset(CustomQuerySet)):
use_for_related_fields = True
class CustomFromQSModel(models.Model):
boolean = models.BooleanField(default=False)
objects = CustomFromQSManager()
# 352
class CombinedField(models.CharField):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.another_field = models.CharField(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, private_only=True)
self.another_field.contribute_to_class(cls, name, **kwargs)
class CombinedFieldModel(models.Model):
text = CombinedField(max_length=8, default='example')
# 353
class Foo(models.Model):
pass
class Bar(models.Model):
foo = models.OneToOneField(
to="Foo",
on_delete=models.SET_NULL,
related_name='bar',
blank=True,
null=True
)
# 385
class Client(models.Model):
def __init__(self, *args, **kwargs):
# copied from Django 2.1.5 (not exists in Django 3.1.5 installed by current requirements)
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*args, *moreargs, **{**kwargs, **morekwargs})
return _curried
super().__init__(*args, **kwargs)
setattr(self, '_get_private_data', curry(sum, [1, 2, 3, 4]))
name = models.CharField(max_length=255)
|
|
"""
Test featurize.py.
"""
import joblib
import numpy as np
from rdkit_utils import conformers, serial
import shutil
import tempfile
import unittest
from rdkit import Chem
from rdkit.Chem import AllChem
from vs_utils.scripts.featurize import main, parse_args
from vs_utils.utils import read_pickle, write_pickle
class TestFeaturize(unittest.TestCase):
"""
Test featurize.py.
"""
def setUp(self):
"""
Set up for tests. Writes molecules and targets to files.
"""
self.temp_dir = tempfile.mkdtemp()
smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O',
'C[C@@H](C1=CC=C(C=C1)CC(C)C)C(=O)O']
self.names = ['aspirin', 'ibuprofen']
engine = conformers.ConformerGenerator(max_conformers=1)
self.mols = []
self.smiles = [] # use RDKit-generated SMILES
for i in xrange(len(smiles)):
mol = Chem.MolFromSmiles(smiles[i])
mol.SetProp('_Name', self.names[i])
self.mols.append(engine.generate_conformers(mol))
self.smiles.append(Chem.MolToSmiles(mol, isomericSmiles=True,
canonical=True))
# write mols
_, self.input_filename = tempfile.mkstemp(suffix='.sdf',
dir=self.temp_dir)
writer = serial.MolWriter()
writer.open(self.input_filename)
writer.write(self.mols)
writer.close()
# write targets
self.targets = [0, 1]
_, self.targets_filename = tempfile.mkstemp(suffix='.pkl',
dir=self.temp_dir)
write_pickle(self.targets, self.targets_filename)
def tearDown(self):
"""
Delete temporary files.
Parameters
----------
filenames : list
Filenames to delete.
"""
shutil.rmtree(self.temp_dir)
def check_output(self, featurize_args, shape, targets=None, names=None,
smiles=None, output_suffix='.pkl'):
"""
Check features shape, targets, and names.
Parameters
----------
featurize_args : list
Featurizer-specific arguments for script.
filename : str
Output filename.
shape : tuple
Expected shape of features.
targets : list, optional
Expected targets. Defaults to self.targets.
names : list, optional
Expected names. Defaults to self.names.
smiles : list, optional
Expected SMILES. Defaults to self.smiles.
output_suffix : str, optional (default '.pkl')
Suffix for output files.
"""
# generate command-line arguments
_, output_filename = tempfile.mkstemp(suffix=output_suffix,
dir=self.temp_dir)
input_args = [self.input_filename, '-t', self.targets_filename,
output_filename, '--names'] + featurize_args
# run script
args = parse_args(input_args)
main(args.klass, args.input, args.output, target_filename=args.targets,
featurizer_kwargs=vars(args.featurizer_kwargs), names=args.names,
scaffolds=args.scaffolds, chiral_scaffolds=args.chiral_scaffolds)
# read output file
if output_filename.endswith('.joblib'):
data = joblib.load(output_filename)
else:
data = read_pickle(output_filename)
# check values
if targets is None:
targets = self.targets
if names is None:
names = self.names
if smiles is None:
smiles = self.smiles
assert data['features'].shape == shape, data['features'].shape
assert np.array_equal(data['y'], targets), data['y']
assert np.array_equal(data['names'], names), data['names']
assert np.array_equal(data['smiles'], smiles), data['smiles']
# return output in case anything else needs to be checked
return data
def test_pickle(self):
"""
Save features to a pickle.
"""
self.check_output(['circular'], (2, 2048))
def test_compressed_pickle(self):
"""
Save features to a compressed pickle.
"""
self.check_output(['circular'], (2, 2048), output_suffix='.pkl.gz')
def test_joblib(self):
"""
Save features using joblib.dump.
"""
self.check_output(['circular'], (2, 2048), output_suffix='.joblib')
def test_circular(self):
"""
Test circular fingerprints.
"""
self.check_output(['circular', '--size', '512'], (2, 512))
def test_sparse_circular(self):
"""
Test sparse circular fingerprints.
"""
data = self.check_output(['circular', '--sparse'], (2,))
for value in data['features']:
assert isinstance(value, dict)
assert len(value)
def test_coulomb_matrix(self):
"""
Test Coulomb matrices.
"""
self.check_output(['coulomb_matrix', '--max_atoms', '50'],
(2, 1, 1275))
def test_image_features(self):
"""
Test image features.
"""
self.check_output(['image', '--size', '16'], (2, 16, 16, 3))
def test_esp(self):
"""
Test ESP.
"""
self.check_output(['esp', '--size', '20'], (2, 1, 61, 61, 61))
def test_shape_grid(self):
"""
Test ShapeGrid.
"""
self.check_output(['shape', '--size', '40'], (2, 1, 40, 40, 40))
def test_mw(self):
"""
Test calculation of molecular weight.
"""
self.check_output(['mw'], (2, 1))
def test_descriptors(self):
"""
Test calculation of RDKit descriptors.
"""
self.check_output(['descriptors'], (2, 196))
def test_scaffold(self):
"""
Test scaffold featurizer.
"""
self.check_output(['scaffold'], (2,))
def test_scaffolds(self):
"""
Test scaffold generation.
"""
data = self.check_output(['--scaffolds', 'circular'], (2, 2048))
assert Chem.MolFromSmiles(data['scaffolds'][0]).GetNumAtoms() == 6
assert Chem.MolFromSmiles(data['scaffolds'][1]).GetNumAtoms() == 6
def test_chiral_scaffolds(self):
"""
Test chiral scaffold generation.
"""
# romosetron
mol = Chem.MolFromSmiles(
'CN1C=C(C2=CC=CC=C21)C(=O)[C@@H]3CCC4=C(C3)NC=N4')
mol.SetProp('_Name', 'romosetron')
AllChem.Compute2DCoords(mol)
self.mols[1] = mol
self.names[1] = 'romosetron'
self.smiles[1] = Chem.MolToSmiles(mol, isomericSmiles=True)
# write mols
_, self.input_filename = tempfile.mkstemp(suffix='.sdf',
dir=self.temp_dir)
writer = serial.MolWriter()
writer.open(self.input_filename)
writer.write(self.mols)
writer.close()
# run script w/o chiral scaffolds
data = self.check_output(['--scaffolds', 'circular'], (2, 2048))
achiral_scaffold = data['scaffolds'][1]
# run script w/ chiral scaffolds
data = self.check_output(['--scaffolds', '--chiral-scaffolds',
'circular'], (2, 2048))
chiral_scaffold = data['scaffolds'][1]
assert achiral_scaffold != chiral_scaffold
def test_collate_mols1(self):
"""
Test collate_mols where molecules are pruned.
"""
# write targets
targets = {'names': ['ibuprofen'], 'y': [0]}
write_pickle(targets, self.targets_filename)
# run script
self.check_output(['circular'], (1, 2048), targets=targets['y'],
names=targets['names'], smiles=[self.smiles[1]])
def test_collate_mols2(self):
"""
Test collate_mols where targets are pruned.
"""
# write targets
targets = {'names': ['aspirin', 'ibuprofen'], 'y': [0, 1]}
write_pickle(targets, self.targets_filename)
# write mols
writer = serial.MolWriter()
writer.open(self.input_filename)
writer.write([self.mols[0]])
writer.close()
# run script
self.check_output(['circular'], (1, 2048), targets=[0],
names=['aspirin'], smiles=[self.smiles[0]])
def test_collate_mols3(self):
"""
Test collate_mols where targets are in a different order than
molecules.
"""
# write targets
targets = {'names': ['ibuprofen', 'aspirin'], 'y': [1, 0]}
write_pickle(targets, self.targets_filename)
# run script
self.check_output(['circular'], (2, 2048))
|
|
"""Tests for the main tournament class."""
import axelrod
import logging
from multiprocess import Queue, cpu_count
import unittest
import random
import tempfile
import csv
from hypothesis import given, example, settings
from hypothesis.strategies import integers, lists, sampled_from, random_module, floats
try:
# Python 3
from unittest.mock import MagicMock
except ImportError:
# Python 2
from mock import MagicMock
test_strategies = [axelrod.Cooperator,
axelrod.TitForTat,
axelrod.Defector,
axelrod.Grudger,
axelrod.GoByMajority]
test_repetitions = 5
test_turns = 100
test_prob_end = .5
class TestTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_turns = test_turns
cls.expected_payoff = [
[600, 600, 0, 600, 600],
[600, 600, 199, 600, 600],
[1000, 204, 200, 204, 204],
[600, 600, 199, 600, 600],
[600, 600, 199, 600, 600]]
cls.expected_cooperation = [
[200, 200, 200, 200, 200],
[200, 200, 1, 200, 200],
[0, 0, 0, 0, 0],
[200, 200, 1, 200, 200],
[200, 200, 1, 200, 200]]
def test_init(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=self.test_turns,
processes=4,
noise=0.2)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertIsInstance(
tournament.players[0].match_attributes['game'], axelrod.Game
)
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, self.test_turns)
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertEqual(tournament._processes, 4)
self.assertFalse(tournament.prebuilt_cache)
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.deterministic_cache, {})
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament._parallel_repetitions, 10)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
def test_serial_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
# Test that _run_serial_repetitions is called with empty matches list
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._run_serial_repetitions = MagicMock(
name='_run_serial_repetitions')
tournament._run_parallel_repetitions = MagicMock(
name='_run_parallel_repetitions')
tournament.play()
tournament._run_serial_repetitions.assert_called_once_with([])
self.assertFalse(tournament._run_parallel_repetitions.called)
@given(s=lists(sampled_from(axelrod.strategies),
min_size=2, # Errors are returned if less than 2 strategies
max_size=5, unique=True),
turns=integers(min_value=2, max_value=50),
repetitions=integers(min_value=2, max_value=4),
rm=random_module())
@settings(max_examples=50, timeout=0)
@example(s=test_strategies, turns=test_turns, repetitions=test_repetitions,
rm=random.seed(0))
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(s=[axelrod.BackStabber, axelrod.MindReader], turns=2, repetitions=1,
rm=random.seed(0))
@example(s=[axelrod.ThueMorse, axelrod.MindReader], turns=2, repetitions=1,
rm=random.seed(0))
def test_property_serial_play(self, s, turns, repetitions, rm):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
players = [strat() for strat in s]
tournament = axelrod.Tournament(
name=self.test_name,
players=players,
game=self.game,
turns=turns,
repetitions=repetitions)
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.nplayers, len(players))
self.assertEqual(results.players, players)
def test_parallel_play(self):
# Test that we get an instance of ResultSet
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=2)
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
# The following relates to #516
players = [axelrod.Cooperator(), axelrod.Defector(),
axelrod.BackStabber(), axelrod.PSOGambler(),
axelrod.ThueMorse(), axelrod.DoubleCrosser()]
tournament = axelrod.Tournament(
name=self.test_name,
players=players,
game=self.game,
turns=20,
repetitions=self.test_repetitions,
processes=2)
scores = tournament.play().scores
self.assertEqual(len(scores), len(players))
def test_build_cache_required(self):
# Noisy, no prebuilt cache, empty deterministic cache
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
processes=4,
noise=0.2,
prebuilt_cache=False)
self.assertFalse(tournament._build_cache_required())
# Noisy, with prebuilt cache, empty deterministic cache
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
processes=4,
noise=0.2,
prebuilt_cache=True)
self.assertFalse(tournament._build_cache_required())
# Not noisy, with prebuilt cache, deterministic cache has content
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
processes=4,
prebuilt_cache=True)
tournament.deterministic_cache = {'test': 100}
self.assertFalse(tournament._build_cache_required())
# Not noisy, no prebuilt cache, deterministic cache has content
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
processes=4,
prebuilt_cache=False)
tournament.deterministic_cache = {'test': 100}
self.assertTrue(tournament._build_cache_required())
# Not noisy, with prebuilt cache, empty deterministic cache
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
processes=4,
prebuilt_cache=True)
self.assertTrue(tournament._build_cache_required())
# Not noisy, no prebuilt cache, empty deterministic cache
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
processes=4,
prebuilt_cache=False)
self.assertTrue(tournament._build_cache_required())
def test_build_cache(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=2)
tournament._run_single_repetition = MagicMock(
name='_run_single_repetition')
tournament._build_cache([])
tournament._run_single_repetition.assert_called_once_with([])
self.assertEqual(
tournament._parallel_repetitions, self.test_repetitions - 1)
def test_run_single_repetition(self):
interactions = []
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._run_single_repetition(interactions)
self.assertEqual(len(tournament.interactions), 1)
self.assertEqual(len(tournament.interactions[0]), 15)
def test_run_serial_repetitions(self):
interactions = []
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._run_serial_repetitions(interactions)
self.assertEqual(len(tournament.interactions), self.test_repetitions)
def test_run_parallel_repetitions(self):
interactions = []
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=2)
tournament._run_parallel_repetitions(interactions)
self.assertEqual(len(interactions), self.test_repetitions)
for r in interactions:
self.assertEqual(len(r.values()), 15)
def test_n_workers(self):
max_processes = cpu_count()
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=1)
self.assertEqual(tournament._n_workers(), max_processes)
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=max_processes + 2)
self.assertEqual(tournament._n_workers(), max_processes)
@unittest.skipIf(
cpu_count() < 2,
"not supported on single processor machines")
def test_2_workers(self):
# This is a separate test with a skip condition because we
# cannot guarantee that the tests will always run on a machine
# with more than one processor
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions,
processes=2)
self.assertEqual(tournament._n_workers(), 2)
def test_start_workers(self):
workers = 2
work_queue = Queue()
done_queue = Queue()
for repetition in range(self.test_repetitions):
work_queue.put(repetition)
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
tournament._start_workers(workers, work_queue, done_queue)
stops = 0
while stops < workers:
payoffs = done_queue.get()
if payoffs == 'STOP':
stops += 1
self.assertEqual(stops, workers)
def test_process_done_queue(self):
workers = 2
done_queue = Queue()
matches = []
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
for r in range(self.test_repetitions):
done_queue.put({})
for w in range(workers):
done_queue.put('STOP')
tournament._process_done_queue(workers, done_queue, matches)
self.assertEqual(len(matches), self.test_repetitions)
def test_worker(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
work_queue = Queue()
for repetition in range(self.test_repetitions):
work_queue.put(repetition)
work_queue.put('STOP')
done_queue = Queue()
tournament._worker(work_queue, done_queue)
for r in range(self.test_repetitions):
new_matches = done_queue.get()
self.assertEqual(len(new_matches), 15)
for index_pair, match in new_matches.items():
self.assertIsInstance(index_pair, tuple)
self.assertIsInstance(match, list)
queue_stop = done_queue.get()
self.assertEqual(queue_stop, 'STOP')
def test_build_result_set(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=200,
repetitions=self.test_repetitions)
results = tournament._build_result_set()
self.assertIsInstance(results, axelrod.ResultSet)
@given(turns=integers(min_value=1, max_value=200))
@example(turns=3)
@example(turns=200)
def test_play_matches(self, turns):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
repetitions=self.test_repetitions)
def make_generator():
"""Return a generator used by this method"""
player_classes = [axelrod.Cooperator, axelrod.TitForTat,
axelrod.Defector, axelrod.Grudger]
for i, player_cls in enumerate(player_classes):
for j, opponent_cls in enumerate(player_classes):
if j >= i: # These matches correspond to a round robin
players = (player_cls(), opponent_cls())
match = axelrod.Match(players, turns=turns)
yield ((i, j), match)
matches_generator = make_generator()
interactions = tournament._play_matches(matches_generator)
self.assertEqual(len(interactions), 10)
for index_pair, inter in interactions.items():
self.assertEqual(len(inter), turns)
self.assertEqual(len(index_pair), 2)
for plays in inter:
self.assertIsInstance(plays, tuple)
self.assertEqual(len(plays), 2)
# Check that matches no longer exist?
self.assertEqual((len(list(matches_generator))), 0)
def test_play_and_write_to_csv(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2)
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
tournament.play(filename=tmp_file.name)
with open(tmp_file.name, 'r') as f:
written_data = [[int(r[0]), int(r[1])] + r[2:] for r in csv.reader(f)]
expected_data = [[0, 1, 'Cooperator', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 2, 'Tit For Tat', 'Defector', 'CDDD', 'CDDD'],
[0, 0, 'Cooperator', 'Cooperator', 'CCCC', 'CCCC'],
[3, 3, 'Grudger', 'Grudger', 'CCCC', 'CCCC'],
[2, 2, 'Defector', 'Defector', 'DDDD', 'DDDD'],
[4, 4, 'Soft Go By Majority', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 4, 'Tit For Tat', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 1, 'Tit For Tat', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 3, 'Tit For Tat', 'Grudger', 'CCCC', 'CCCC'],
[2, 3, 'Defector', 'Grudger', 'DCDD', 'DCDD'],
[0, 4, 'Cooperator', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[2, 4, 'Defector', 'Soft Go By Majority',
'DCDD', 'DCDD'],
[0, 3, 'Cooperator', 'Grudger', 'CCCC', 'CCCC'],
[3, 4, 'Grudger', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[0, 2, 'Cooperator', 'Defector', 'CDCD', 'CDCD']]
self.assertEqual(sorted(written_data), sorted(expected_data))
def test_write_to_csv(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2)
tournament.play()
tmp_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
tournament._write_to_csv(tmp_file.name)
with open(tmp_file.name, 'r') as f:
written_data = [[int(r[0]), int(r[1])] + r[2:] for r in csv.reader(f)]
expected_data = [[0, 1, 'Cooperator', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 2, 'Tit For Tat', 'Defector', 'CDDD', 'CDDD'],
[0, 0, 'Cooperator', 'Cooperator', 'CCCC', 'CCCC'],
[3, 3, 'Grudger', 'Grudger', 'CCCC', 'CCCC'],
[2, 2, 'Defector', 'Defector', 'DDDD', 'DDDD'],
[4, 4, 'Soft Go By Majority', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 4, 'Tit For Tat', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 1, 'Tit For Tat', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 3, 'Tit For Tat', 'Grudger', 'CCCC', 'CCCC'],
[2, 3, 'Defector', 'Grudger', 'DCDD', 'DCDD'],
[0, 4, 'Cooperator', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[2, 4, 'Defector', 'Soft Go By Majority',
'DCDD', 'DCDD'],
[0, 3, 'Cooperator', 'Grudger', 'CCCC', 'CCCC'],
[3, 4, 'Grudger', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[0, 2, 'Cooperator', 'Defector', 'CDCD', 'CDCD']]
self.assertEqual(sorted(written_data), sorted(expected_data))
def test_data_for_csv(self):
tournament = axelrod.Tournament(
name=self.test_name,
players=self.players,
game=self.game,
turns=2,
repetitions=2)
tournament.play()
expected_data = [[0, 1, 'Cooperator', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 2, 'Tit For Tat', 'Defector', 'CDDD', 'CDDD'],
[0, 0, 'Cooperator', 'Cooperator', 'CCCC', 'CCCC'],
[3, 3, 'Grudger', 'Grudger', 'CCCC', 'CCCC'],
[2, 2, 'Defector', 'Defector', 'DDDD', 'DDDD'],
[4, 4, 'Soft Go By Majority', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 4, 'Tit For Tat', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[1, 1, 'Tit For Tat', 'Tit For Tat', 'CCCC', 'CCCC'],
[1, 3, 'Tit For Tat', 'Grudger', 'CCCC', 'CCCC'],
[2, 3, 'Defector', 'Grudger', 'DCDD', 'DCDD'],
[0, 4, 'Cooperator', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[2, 4, 'Defector', 'Soft Go By Majority',
'DCDD', 'DCDD'],
[0, 3, 'Cooperator', 'Grudger', 'CCCC', 'CCCC'],
[3, 4, 'Grudger', 'Soft Go By Majority',
'CCCC', 'CCCC'],
[0, 2, 'Cooperator', 'Defector', 'CDCD', 'CDCD']]
generator_data = tournament._data_for_csv()
for row, expected_row in zip(sorted(generator_data), sorted(expected_data)):
self.assertEqual(row, expected_row)
class TestProbEndTournament(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.game = axelrod.Game()
cls.players = [s() for s in test_strategies]
cls.test_name = 'test'
cls.test_repetitions = test_repetitions
cls.test_prob_end = test_prob_end
def test_init(self):
tournament = axelrod.ProbEndTournament(
name=self.test_name,
players=self.players,
game=self.game,
prob_end=self.test_prob_end,
noise=0.2)
self.assertEqual(tournament.match_generator.prob_end, tournament.prob_end)
self.assertEqual(len(tournament.players), len(test_strategies))
self.assertEqual(tournament.game.score(('C', 'C')), (3, 3))
self.assertEqual(tournament.turns, float("inf"))
self.assertEqual(tournament.repetitions, 10)
self.assertEqual(tournament.name, 'test')
self.assertEqual(tournament._processes, None)
self.assertFalse(tournament.prebuilt_cache)
self.assertTrue(tournament._with_morality)
self.assertIsInstance(tournament._logger, logging.Logger)
self.assertEqual(tournament.deterministic_cache, {})
self.assertEqual(tournament.noise, 0.2)
self.assertEqual(tournament._parallel_repetitions, 10)
anonymous_tournament = axelrod.Tournament(players=self.players)
self.assertEqual(anonymous_tournament.name, 'axelrod')
@given(s=lists(sampled_from(axelrod.strategies),
min_size=2, # Errors are returned if less than 2 strategies
max_size=5, unique=True),
prob_end=floats(min_value=.1, max_value=.9),
repetitions=integers(min_value=2, max_value=4),
rm=random_module())
@settings(max_examples=50, timeout=0)
@example(s=test_strategies, prob_end=test_prob_end,
repetitions=test_repetitions,
rm=random.seed(0))
def test_build_cache_never_required(self, s, prob_end, repetitions, rm):
"""
As the matches have a sampled length a cache is never required.
"""
players = [strat() for strat in s]
tournament = axelrod.ProbEndTournament(
name=self.test_name,
players=players,
game=self.game,
prob_end=prob_end,
repetitions=repetitions)
self.assertFalse(tournament._build_cache_required())
@given(s=lists(sampled_from(axelrod.strategies),
min_size=2, # Errors are returned if less than 2 strategies
max_size=5, unique=True),
prob_end=floats(min_value=.1, max_value=.9),
repetitions=integers(min_value=2, max_value=4),
rm=random_module())
@settings(max_examples=50, timeout=0)
@example(s=test_strategies, prob_end=.2, repetitions=test_repetitions,
rm=random.seed(0))
# These two examples are to make sure #465 is fixed.
# As explained there: https://github.com/Axelrod-Python/Axelrod/issues/465,
# these two examples were identified by hypothesis.
@example(s=[axelrod.BackStabber, axelrod.MindReader], prob_end=.2, repetitions=1,
rm=random.seed(0))
@example(s=[axelrod.ThueMorse, axelrod.MindReader], prob_end=.2, repetitions=1,
rm=random.seed(0))
def test_property_serial_play(self, s, prob_end, repetitions, rm):
"""Test serial play using hypothesis"""
# Test that we get an instance of ResultSet
players = [strat() for strat in s]
tournament = axelrod.ProbEndTournament(
name=self.test_name,
players=players,
game=self.game,
prob_end=prob_end,
repetitions=repetitions)
results = tournament.play()
self.assertIsInstance(results, axelrod.ResultSet)
self.assertEqual(results.nplayers, len(players))
self.assertEqual(results.players, players)
self.assertEqual(len(results.interactions), repetitions)
|
|
import sys, os, binascii, shutil, io
from . import __version_verifier_modules__
from . import ffiplatform
if sys.version_info >= (3, 3):
import importlib.machinery
def _extension_suffixes():
return importlib.machinery.EXTENSION_SUFFIXES[:]
else:
import imp
def _extension_suffixes():
return [suffix for suffix, _, type in imp.get_suffixes()
if type == imp.C_EXTENSION]
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
class Verifier(object):
def __init__(self, ffi, preamble, tmpdir=None, modulename=None,
ext_package=None, tag='', force_generic_engine=False,
source_extension='.c', flags=None, relative_to=None, **kwds):
self.ffi = ffi
self.preamble = preamble
if not modulename:
flattened_kwds = ffiplatform.flatten(kwds)
vengine_class = _locate_engine_class(ffi, force_generic_engine)
self._vengine = vengine_class(self)
self._vengine.patch_extension_kwds(kwds)
self.flags = flags
self.kwds = self.make_relative_to(kwds, relative_to)
#
if modulename:
if tag:
raise TypeError("can't specify both 'modulename' and 'tag'")
else:
key = '\x00'.join([sys.version[:3], __version_verifier_modules__,
preamble, flattened_kwds] +
ffi._cdefsources)
if sys.version_info >= (3,):
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key,
k1, k2)
suffix = _get_so_suffixes()[0]
self.tmpdir = tmpdir or _caller_dir_pycache()
self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension)
self.modulefilename = os.path.join(self.tmpdir, modulename + suffix)
self.ext_package = ext_package
self._has_source = False
self._has_module = False
def write_source(self, file=None):
"""Write the C source code. It is produced in 'self.sourcefilename',
which can be tweaked beforehand."""
with self.ffi._lock:
if self._has_source and file is None:
raise ffiplatform.VerificationError(
"source code already written")
self._write_source(file)
def compile_module(self):
"""Write the C source code (if not done already) and compile it.
This produces a dynamic link library in 'self.modulefilename'."""
with self.ffi._lock:
if self._has_module:
raise ffiplatform.VerificationError("module already compiled")
if not self._has_source:
self._write_source()
self._compile_module()
def load_library(self):
"""Get a C module from this Verifier instance.
Returns an instance of a FFILibrary class that behaves like the
objects returned by ffi.dlopen(), but that delegates all
operations to the C module. If necessary, the C code is written
and compiled first.
"""
with self.ffi._lock:
if not self._has_module:
self._locate_module()
if not self._has_module:
if not self._has_source:
self._write_source()
self._compile_module()
return self._load_library()
def get_module_name(self):
basename = os.path.basename(self.modulefilename)
# kill both the .so extension and the other .'s, as introduced
# by Python 3: 'basename.cpython-33m.so'
basename = basename.split('.', 1)[0]
# and the _d added in Python 2 debug builds --- but try to be
# conservative and not kill a legitimate _d
if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'):
basename = basename[:-2]
return basename
def get_extension(self):
if not self._has_source:
with self.ffi._lock:
if not self._has_source:
self._write_source()
sourcename = ffiplatform.maybe_relative_path(self.sourcefilename)
modname = self.get_module_name()
return ffiplatform.get_extension(sourcename, modname, **self.kwds)
def generates_python_module(self):
return self._vengine._gen_python_module
def make_relative_to(self, kwds, relative_to):
if relative_to and os.path.dirname(relative_to):
dirname = os.path.dirname(relative_to)
kwds = kwds.copy()
for key in ffiplatform.LIST_OF_FILE_NAMES:
if key in kwds:
lst = kwds[key]
if not isinstance(lst, (list, tuple)):
raise TypeError("keyword '%s' should be a list or tuple"
% (key,))
lst = [os.path.join(dirname, fn) for fn in lst]
kwds[key] = lst
return kwds
# ----------
def _locate_module(self):
if not os.path.isfile(self.modulefilename):
if self.ext_package:
try:
pkg = __import__(self.ext_package, None, None, ['__doc__'])
except ImportError:
return # cannot import the package itself, give up
# (e.g. it might be called differently before installation)
path = pkg.__path__
else:
path = None
filename = self._vengine.find_module(self.get_module_name(), path,
_get_so_suffixes())
if filename is None:
return
self.modulefilename = filename
self._vengine.collect_types()
self._has_module = True
def _write_source_to(self, file):
self._vengine._f = file
try:
self._vengine.write_source_to_f()
finally:
del self._vengine._f
def _write_source(self, file=None):
if file is not None:
self._write_source_to(file)
else:
# Write our source file to an in memory file.
f = NativeIO()
self._write_source_to(f)
source_data = f.getvalue()
# Determine if this matches the current file
if os.path.exists(self.sourcefilename):
with open(self.sourcefilename, "r") as fp:
needs_written = not (fp.read() == source_data)
else:
needs_written = True
# Actually write the file out if it doesn't match
if needs_written:
_ensure_dir(self.sourcefilename)
with open(self.sourcefilename, "w") as fp:
fp.write(source_data)
# Set this flag
self._has_source = True
def _compile_module(self):
# compile this C source
tmpdir = os.path.dirname(self.sourcefilename)
outputfilename = ffiplatform.compile(tmpdir, self.get_extension())
try:
same = ffiplatform.samefile(outputfilename, self.modulefilename)
except OSError:
same = False
if not same:
_ensure_dir(self.modulefilename)
shutil.move(outputfilename, self.modulefilename)
self._has_module = True
def _load_library(self):
assert self._has_module
if self.flags is not None:
return self._vengine.load_library(self.flags)
else:
return self._vengine.load_library()
# ____________________________________________________________
_FORCE_GENERIC_ENGINE = False # for tests
def _locate_engine_class(ffi, force_generic_engine):
if _FORCE_GENERIC_ENGINE:
force_generic_engine = True
if not force_generic_engine:
if '__pypy__' in sys.builtin_module_names:
force_generic_engine = True
else:
try:
import _cffi_backend
except ImportError:
_cffi_backend = '?'
if ffi._backend is not _cffi_backend:
force_generic_engine = True
if force_generic_engine:
from . import vengine_gen
return vengine_gen.VGenericEngine
else:
from . import vengine_cpy
return vengine_cpy.VCPythonEngine
# ____________________________________________________________
_TMPDIR = None
def _caller_dir_pycache():
if _TMPDIR:
return _TMPDIR
result = os.environ.get('CFFI_TMPDIR')
if result:
return result
filename = sys._getframe(2).f_code.co_filename
return os.path.abspath(os.path.join(os.path.dirname(filename),
'__pycache__'))
def set_tmpdir(dirname):
"""Set the temporary directory to use instead of __pycache__."""
global _TMPDIR
_TMPDIR = dirname
def cleanup_tmpdir(tmpdir=None, keep_so=False):
"""Clean up the temporary directory by removing all files in it
called `_cffi_*.{c,so}` as well as the `build` subdirectory."""
tmpdir = tmpdir or _caller_dir_pycache()
try:
filelist = os.listdir(tmpdir)
except OSError:
return
if keep_so:
suffix = '.c' # only remove .c files
else:
suffix = _get_so_suffixes()[0].lower()
for fn in filelist:
if fn.lower().startswith('_cffi_') and (
fn.lower().endswith(suffix) or fn.lower().endswith('.c')):
try:
os.unlink(os.path.join(tmpdir, fn))
except OSError:
pass
clean_dir = [os.path.join(tmpdir, 'build')]
for dir in clean_dir:
try:
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if os.path.isdir(fn):
clean_dir.append(fn)
else:
os.unlink(fn)
except OSError:
pass
def _get_so_suffixes():
suffixes = _extension_suffixes()
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def _ensure_dir(filename):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Serves the stub App Engine APIs (e.g. memcache, datastore) over HTTP.
The Remote API protocol is used for communication.
"""
import logging
import os
import pickle
import shutil
import socket
import sys
import tempfile
import threading
import time
import traceback
import urllib2
import urlparse
import google
import yaml
# Stubs
from google.appengine.api import datastore_file_stub
from google.appengine.api import mail_stub
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api.app_identity import app_identity_stub
from google.appengine.api.blobstore import blobstore_stub
from google.appengine.api.blobstore import file_blob_storage
from google.appengine.api.capabilities import capability_stub
from google.appengine.api.channel import channel_service_stub
from google.appengine.api.files import file_service_stub
from google.appengine.api.logservice import logservice_stub
from google.appengine.api.search import simple_search_stub
from google.appengine.api.taskqueue import taskqueue_stub
from google.appengine.api.prospective_search import prospective_search_stub
from google.appengine.api.memcache import memcache_stub
from google.appengine.api.modules import modules_stub
from google.appengine.api.remote_socket import _remote_socket_stub
from google.appengine.api.system import system_stub
from google.appengine.api.xmpp import xmpp_service_stub
from google.appengine.datastore import datastore_sqlite_stub
from google.appengine.datastore import datastore_stub_util
from google.appengine.datastore import datastore_v4_pb
from google.appengine.datastore import datastore_v4_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools.devappserver2 import wsgi_server
# TODO: Remove this lock when stubs have been audited for thread
# safety.
GLOBAL_API_LOCK = threading.RLock()
# We don't want to support datastore_v4 everywhere, because users are supposed
# to use the Cloud Datastore API going forward, so we don't want to put these
# entries in remote_api_servers.SERVICE_PB_MAP. But for our own implementation
# of the Cloud Datastore API we need those methods to work when an instance
# issues them, specifically the DatstoreApiServlet running as a module inside
# the app we are running. The consequence is that other app code can also
# issue datastore_v4 API requests, but since we don't document these requests
# or export them through any language bindings this is unlikely in practice.
_DATASTORE_V4_METHODS = {
'AllocateIds': (datastore_v4_pb.AllocateIdsRequest,
datastore_v4_pb.AllocateIdsResponse),
'BeginTransaction': (datastore_v4_pb.BeginTransactionRequest,
datastore_v4_pb.BeginTransactionResponse),
'Commit': (datastore_v4_pb.CommitRequest,
datastore_v4_pb.CommitResponse),
'ContinueQuery': (datastore_v4_pb.ContinueQueryRequest,
datastore_v4_pb.ContinueQueryResponse),
'Lookup': (datastore_v4_pb.LookupRequest,
datastore_v4_pb.LookupResponse),
'Rollback': (datastore_v4_pb.RollbackRequest,
datastore_v4_pb.RollbackResponse),
'RunQuery': (datastore_v4_pb.RunQueryRequest,
datastore_v4_pb.RunQueryResponse),
}
def _execute_request(request):
"""Executes an API method call and returns the response object.
Args:
request: A remote_api_pb.Request object representing the API call e.g. a
call to memcache.Get.
Returns:
A ProtocolBuffer.ProtocolMessage representing the API response e.g. a
memcache_service_pb.MemcacheGetResponse.
Raises:
apiproxy_errors.CallNotFoundError: if the requested method doesn't exist.
apiproxy_errors.ApplicationError: if the API method calls fails.
"""
service = request.service_name()
method = request.method()
if request.has_request_id():
request_id = request.request_id()
else:
logging.error('Received a request without request_id: %s', request)
request_id = None
service_methods = (_DATASTORE_V4_METHODS if service == 'datastore_v4'
else remote_api_services.SERVICE_PB_MAP.get(service, {}))
# We do this rather than making a new map that is a superset of
# remote_api_services.SERVICE_PB_MAP because that map is not initialized
# all in one place, so we would have to be careful about where we made
# our new map.
request_class, response_class = service_methods.get(method, (None, None))
if not request_class:
raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,
method))
request_data = request_class()
request_data.ParseFromString(request.request())
response_data = response_class()
service_stub = apiproxy_stub_map.apiproxy.GetStub(service)
def make_request():
service_stub.MakeSyncCall(service,
method,
request_data,
response_data,
request_id)
# If the service has not declared itself as threadsafe acquire
# GLOBAL_API_LOCK.
if service_stub.THREADSAFE:
make_request()
else:
with GLOBAL_API_LOCK:
make_request()
return response_data
class APIServer(wsgi_server.WsgiServer):
"""Serves API calls over HTTP."""
def __init__(self, host, port, app_id):
self._app_id = app_id
self._host = host
super(APIServer, self).__init__((host, port), self)
def start(self):
"""Start the API Server."""
super(APIServer, self).start()
logging.info('Starting API server at: http://%s:%d', self._host, self.port)
def quit(self):
cleanup_stubs()
super(APIServer, self).quit()
def _handle_POST(self, environ, start_response):
start_response('200 OK', [('Content-Type', 'application/octet-stream')])
start_time = time.time()
response = remote_api_pb.Response()
try:
request = remote_api_pb.Request()
# NOTE: Exceptions encountered when parsing the PB or handling the request
# will be propagated back to the caller the same way as exceptions raised
# by the actual API call.
if environ.get('HTTP_TRANSFER_ENCODING') == 'chunked':
# CherryPy concatenates all chunks when 'wsgi.input' is read but v3.2.2
# will not return even when all of the data in all chunks has been
# read. See: https://bitbucket.org/cherrypy/cherrypy/issue/1131.
wsgi_input = environ['wsgi.input'].read(2**32)
else:
wsgi_input = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
request.ParseFromString(wsgi_input)
api_response = _execute_request(request).Encode()
response.set_response(api_response)
except Exception, e:
if isinstance(e, apiproxy_errors.ApplicationError):
level = logging.DEBUG
application_error = response.mutable_application_error()
application_error.set_code(e.application_error)
application_error.set_detail(e.error_detail)
else:
# If the runtime instance is not Python, it won't be able to unpickle
# the exception so use level that won't be ignored by default.
level = logging.ERROR
# Even if the runtime is Python, the exception may be unpicklable if
# it requires importing a class blocked by the sandbox so just send
# back the exception representation.
# But due to our use of the remote API, at least some apiproxy errors
# are generated in the Dev App Server main instance and not in the
# language runtime and wrapping them causes different behavior from
# prod so don't wrap them.
if not isinstance(e, apiproxy_errors.Error):
e = RuntimeError(repr(e))
# While not strictly necessary for ApplicationError, do this to limit
# differences with remote_api:handler.py.
response.set_exception(pickle.dumps(e))
logging.log(level, 'Exception while handling %s\n%s', request,
traceback.format_exc())
encoded_response = response.Encode()
logging.debug('Handled %s.%s in %0.4f',
request.service_name(),
request.method(),
time.time() - start_time)
return [encoded_response]
def _handle_GET(self, environ, start_response):
params = urlparse.parse_qs(environ['QUERY_STRING'])
rtok = params.get('rtok', ['0'])[0]
start_response('200 OK', [('Content-Type', 'text/plain')])
return [yaml.dump({'app_id': self._app_id,
'rtok': rtok})]
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
return self._handle_GET(environ, start_response)
elif environ['REQUEST_METHOD'] == 'POST':
return self._handle_POST(environ, start_response)
else:
start_response('405 Method Not Allowed', [])
return []
def setup_stubs(
request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name):
"""Configures the APIs hosted by this server.
Args:
request_data: An apiproxy_stub.RequestInformation instance used by the
stubs to lookup information about the request associated with an API
call.
app_id: The str application id e.g. "guestbook".
application_root: The path to the directory containing the user's
application e.g. "/home/joe/myapp".
trusted: A bool indicating if privileged APIs should be made available.
appidentity_email_address: Email address associated with a service account
that has a downloadable key. May be None for no local application
identity.
appidentity_private_key_path: Path to private key file associated with
service account (.pem format). Must be set if appidentity_email_address
is set.
blobstore_path: The path to the file that should be used for blobstore
storage.
datastore_consistency: The datastore_stub_util.BaseConsistencyPolicy to
use as the datastore consistency policy.
datastore_path: The path to the file that should be used for datastore
storage.
datastore_require_indexes: A bool indicating if the same production
datastore indexes requirements should be enforced i.e. if True then
a google.appengine.ext.db.NeedIndexError will be be raised if a query
is executed without the required indexes.
datastore_auto_id_policy: The type of sequence from which the datastore
stub assigns auto IDs, either datastore_stub_util.SEQUENTIAL or
datastore_stub_util.SCATTERED.
images_host_prefix: The URL prefix (protocol://host:port) to prepend to
image urls on calls to images.GetUrlBase.
logs_path: Path to the file to store the logs data in.
mail_smtp_host: The SMTP hostname that should be used when sending e-mails.
If None then the mail_enable_sendmail argument is considered.
mail_smtp_port: The SMTP port number that should be used when sending
e-mails. If this value is None then mail_smtp_host must also be None.
mail_smtp_user: The username to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host is also None or if
the SMTP server does not require authentication.
mail_smtp_password: The password to use when authenticating with the
SMTP server. This value may be None if mail_smtp_host or mail_smtp_user
is also None.
mail_enable_sendmail: A bool indicating if sendmail should be used when
sending e-mails. This argument is ignored if mail_smtp_host is not None.
mail_show_mail_body: A bool indicating whether the body of sent e-mails
should be written to the logs.
mail_allow_tls: A bool indicating whether TLS should be allowed when
communicating with an SMTP server. This argument is ignored if
mail_smtp_host is None.
matcher_prospective_search_path: The path to the file that should be used to
save prospective search subscriptions.
search_index_path: The path to the file that should be used for search index
storage.
taskqueue_auto_run_tasks: A bool indicating whether taskqueue tasks should
be run automatically or it the must be manually triggered.
taskqueue_default_http_server: A str containing the address of the http
server that should be used to execute tasks.
user_login_url: A str containing the url that should be used for user login.
user_logout_url: A str containing the url that should be used for user
logout.
default_gcs_bucket_name: A str, overriding the default bucket behavior.
"""
identity_stub = app_identity_stub.AppIdentityServiceStub.Create(
email_address=appidentity_email_address,
private_key_path=appidentity_private_key_path)
if default_gcs_bucket_name is not None:
identity_stub.SetDefaultGcsBucketName(default_gcs_bucket_name)
apiproxy_stub_map.apiproxy.RegisterStub('app_identity_service', identity_stub)
blob_storage = file_blob_storage.FileBlobStorage(blobstore_path, app_id)
apiproxy_stub_map.apiproxy.RegisterStub(
'blobstore',
blobstore_stub.BlobstoreServiceStub(blob_storage,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'capability_service',
capability_stub.CapabilityServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'channel',
channel_service_stub.ChannelServiceStub(request_data=request_data))
datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(
app_id,
datastore_path,
datastore_require_indexes,
trusted,
root_path=application_root,
auto_id_policy=datastore_auto_id_policy)
datastore_stub.SetConsistencyPolicy(datastore_consistency)
apiproxy_stub_map.apiproxy.ReplaceStub(
'datastore_v3', datastore_stub)
apiproxy_stub_map.apiproxy.RegisterStub(
'datastore_v4',
datastore_v4_stub.DatastoreV4Stub(app_id))
apiproxy_stub_map.apiproxy.RegisterStub(
'file',
file_service_stub.FileServiceStub(blob_storage))
try:
from google.appengine.api.images import images_stub
except ImportError:
# We register a stub which throws a NotImplementedError for most RPCs.
from google.appengine.api.images import images_not_implemented_stub
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_not_implemented_stub.ImagesNotImplementedServiceStub(
host_prefix=images_host_prefix))
else:
apiproxy_stub_map.apiproxy.RegisterStub(
'images',
images_stub.ImagesServiceStub(host_prefix=images_host_prefix))
apiproxy_stub_map.apiproxy.RegisterStub(
'logservice',
logservice_stub.LogServiceStub(logs_path=logs_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'mail',
mail_stub.MailServiceStub(mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
enable_sendmail=mail_enable_sendmail,
show_mail_body=mail_show_mail_body,
allow_tls=mail_allow_tls))
apiproxy_stub_map.apiproxy.RegisterStub(
'memcache',
memcache_stub.MemcacheServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'search',
simple_search_stub.SearchServiceStub(index_file=search_index_path))
apiproxy_stub_map.apiproxy.RegisterStub(
'modules',
modules_stub.ModulesServiceStub(request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'system',
system_stub.SystemServiceStub(request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'taskqueue',
taskqueue_stub.TaskQueueServiceStub(
root_path=application_root,
auto_task_running=taskqueue_auto_run_tasks,
default_http_server=taskqueue_default_http_server,
request_data=request_data))
apiproxy_stub_map.apiproxy.GetStub('taskqueue').StartBackgroundExecution()
apiproxy_stub_map.apiproxy.RegisterStub(
'urlfetch',
urlfetch_stub.URLFetchServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'user',
user_service_stub.UserServiceStub(login_url=user_login_url,
logout_url=user_logout_url,
request_data=request_data))
apiproxy_stub_map.apiproxy.RegisterStub(
'xmpp',
xmpp_service_stub.XmppServiceStub())
apiproxy_stub_map.apiproxy.RegisterStub(
'matcher',
prospective_search_stub.ProspectiveSearchStub(
matcher_prospective_search_path,
apiproxy_stub_map.apiproxy.GetStub('taskqueue')))
apiproxy_stub_map.apiproxy.RegisterStub(
'remote_socket',
_remote_socket_stub.RemoteSocketServiceStub())
def maybe_convert_datastore_file_stub_data_to_sqlite(app_id, filename):
if not os.access(filename, os.R_OK | os.W_OK):
return
try:
with open(filename, 'rb') as f:
if f.read(16) == 'SQLite format 3\x00':
return
except (IOError, OSError):
return
try:
_convert_datastore_file_stub_data_to_sqlite(app_id, filename)
except:
logging.exception('Failed to convert datastore file stub data to sqlite.')
raise
def _convert_datastore_file_stub_data_to_sqlite(app_id, datastore_path):
logging.info('Converting datastore stub data to sqlite.')
previous_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
try:
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
datastore_stub = datastore_file_stub.DatastoreFileStub(
app_id, datastore_path, trusted=True, save_changes=False)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
entities = _fetch_all_datastore_entities()
sqlite_datastore_stub = datastore_sqlite_stub.DatastoreSqliteStub(
app_id, datastore_path + '.sqlite', trusted=True)
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3',
sqlite_datastore_stub)
datastore.Put(entities)
sqlite_datastore_stub.Close()
finally:
apiproxy_stub_map.apiproxy.ReplaceStub('datastore_v3', previous_stub)
shutil.copy(datastore_path, datastore_path + '.filestub')
os.remove(datastore_path)
shutil.move(datastore_path + '.sqlite', datastore_path)
logging.info('Datastore conversion complete. File stub data has been backed '
'up in %s', datastore_path + '.filestub')
def _fetch_all_datastore_entities():
"""Returns all datastore entities from all namespaces as a list."""
all_entities = []
for namespace in datastore.Query('__namespace__').Run():
namespace_name = namespace.key().name()
for kind in datastore.Query('__kind__', namespace=namespace_name).Run():
all_entities.extend(
datastore.Query(kind.key().name(), namespace=namespace_name).Run())
return all_entities
def test_setup_stubs(
request_data=None,
app_id='myapp',
application_root='/tmp/root',
trusted=False,
appidentity_email_address=None,
appidentity_private_key_path=None,
blobstore_path='/dev/null',
datastore_consistency=None,
datastore_path=':memory:',
datastore_require_indexes=False,
datastore_auto_id_policy=datastore_stub_util.SCATTERED,
images_host_prefix='http://localhost:8080',
logs_path=':memory:',
mail_smtp_host='',
mail_smtp_port=25,
mail_smtp_user='',
mail_smtp_password='',
mail_enable_sendmail=False,
mail_show_mail_body=False,
mail_allow_tls=True,
matcher_prospective_search_path='/dev/null',
search_index_path=None,
taskqueue_auto_run_tasks=False,
taskqueue_default_http_server='http://localhost:8080',
user_login_url='/_ah/login?continue=%s',
user_logout_url='/_ah/login?continue=%s',
default_gcs_bucket_name=None):
"""Similar to setup_stubs with reasonable test defaults and recallable."""
# Reset the stub map between requests because a stub map only allows a
# stub to be added once.
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if datastore_consistency is None:
datastore_consistency = (
datastore_stub_util.PseudoRandomHRConsistencyPolicy())
setup_stubs(request_data,
app_id,
application_root,
trusted,
appidentity_email_address,
appidentity_private_key_path,
blobstore_path,
datastore_consistency,
datastore_path,
datastore_require_indexes,
datastore_auto_id_policy,
images_host_prefix,
logs_path,
mail_smtp_host,
mail_smtp_port,
mail_smtp_user,
mail_smtp_password,
mail_enable_sendmail,
mail_show_mail_body,
mail_allow_tls,
matcher_prospective_search_path,
search_index_path,
taskqueue_auto_run_tasks,
taskqueue_default_http_server,
user_login_url,
user_logout_url,
default_gcs_bucket_name)
def cleanup_stubs():
"""Do any necessary stub cleanup e.g. saving data."""
# Saving datastore
logging.info('Applying all pending transactions and saving the datastore')
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
datastore_stub.Write()
logging.info('Saving search indexes')
apiproxy_stub_map.apiproxy.GetStub('search').Write()
apiproxy_stub_map.apiproxy.GetStub('taskqueue').Shutdown()
|
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_ACLEntity(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import _ACLEntity
return _ACLEntity
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_default_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(entity.type, TYPE)
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set())
def test_ctor_w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(entity.type, TYPE)
self.assertEqual(entity.identifier, ID)
self.assertEqual(entity.get_roles(), set())
def test___str__no_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(str(entity), TYPE)
def test___str__w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(str(entity), '%s-%s' % (TYPE, ID))
def test_grant_simple(self):
TYPE = 'type'
ROLE = 'role'
entity = self._make_one(TYPE)
entity.grant(ROLE)
self.assertEqual(entity.get_roles(), set([ROLE]))
def test_grant_duplicate(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.grant(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2]))
def test_revoke_miss(self):
TYPE = 'type'
ROLE = 'nonesuch'
entity = self._make_one(TYPE)
entity.revoke(ROLE)
self.assertEqual(entity.get_roles(), set())
def test_revoke_hit(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.revoke(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE2]))
def test_grant_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_read()
self.assertEqual(entity.get_roles(), set([entity.READER_ROLE]))
def test_grant_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_write()
self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE]))
def test_grant_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_owner()
self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE]))
def test_revoke_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.READER_ROLE)
entity.revoke_read()
self.assertEqual(entity.get_roles(), set())
def test_revoke_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.WRITER_ROLE)
entity.revoke_write()
self.assertEqual(entity.get_roles(), set())
def test_revoke_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.OWNER_ROLE)
entity.revoke_owner()
self.assertEqual(entity.get_roles(), set())
class Test_ACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ACL
return ACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_validate_predefined(self):
ACL = self._get_target_class()
self.assertIsNone(ACL.validate_predefined(None))
self.assertEqual(ACL.validate_predefined('public-read'), 'publicRead')
self.assertEqual(ACL.validate_predefined('publicRead'), 'publicRead')
with self.assertRaises(ValueError):
ACL.validate_predefined('publicread')
def test_ctor(self):
acl = self._make_one()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test__ensure_loaded(self):
acl = self._make_one()
def _reload():
acl._really_loaded = True
acl.reload = _reload
acl._ensure_loaded()
self.assertTrue(acl._really_loaded)
def test_client_is_abstract(self):
acl = self._make_one()
self.assertRaises(NotImplementedError, lambda: acl.client)
def test_reset(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
acl.reset()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test___iter___empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(list(acl), [])
def test___iter___empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(list(acl), [])
self.assertTrue(acl.loaded)
def test___iter___non_empty_no_roles(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertEqual(list(acl), [])
def test___iter___non_empty_w_roles(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': '%s-%s' % (TYPE, ID), 'role': ROLE}])
def test___iter___non_empty_w_empty_role(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant('')
self.assertEqual(list(acl), [])
def test_entity_from_dict_allUsers_eager(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allUsers', 'role': ROLE})
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_allAuthenticatedUsers(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allAuthenticatedUsers',
'role': ROLE})
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_w_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'type-id', 'role': ROLE})
self.assertEqual(entity.type, 'type')
self.assertEqual(entity.identifier, 'id')
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_wo_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
self.assertRaises(ValueError,
acl.entity_from_dict,
{'entity': 'bogus', 'role': ROLE})
self.assertEqual(list(acl.get_entities()), [])
def test_has_entity_miss_str_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity('nonesuch'))
def test_has_entity_miss_str_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertFalse(acl.has_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_has_entity_miss_entity(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity(entity))
def test_has_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_has_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_get_entity_miss_str_no_default_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity('nonesuch'))
def test_get_entity_miss_str_no_default_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertIsNone(acl.get_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_get_entity_miss_entity_no_default(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity(entity))
def test_get_entity_miss_str_w_default(self):
DEFAULT = object()
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity('nonesuch', DEFAULT), DEFAULT)
def test_get_entity_miss_entity_w_default(self):
from google.cloud.storage.acl import _ACLEntity
DEFAULT = object()
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity(entity, DEFAULT), DEFAULT)
def test_get_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_get_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_add_entity_miss_eager(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_add_entity_miss_lazy(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
self.assertTrue(acl.loaded)
def test_add_entity_hit(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ENTITY_VAL = '%s-%s' % (TYPE, ID)
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertIsNot(acl.get_entity(ENTITY_VAL), before)
self.assertIs(acl.get_entity(ENTITY_VAL), entity)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_miss(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.loaded)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_hit(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
before.grant(ROLE)
entity = acl.entity(TYPE, ID)
self.assertIs(entity, before)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_user(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.user(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'user')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'user-%s' % ID, 'role': ROLE}])
def test_group(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.group(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'group')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'group-%s' % ID, 'role': ROLE}])
def test_domain(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.domain(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'domain')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'domain-%s' % ID, 'role': ROLE}])
def test_all(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
def test_all_authenticated(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all_authenticated()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
def test_get_entities_empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(acl.get_entities(), [])
def test_get_entities_empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(acl.get_entities(), [])
self.assertTrue(acl.loaded)
def test_get_entities_nonempty(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertEqual(acl.get_entities(), [entity])
def test_reload_missing(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/652
ROLE = 'role'
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'GET',
'path': '/testing/acl',
'query_params': {},
})
def test_reload_empty_result_clears_local(self):
ROLE = 'role'
connection = _Connection({'items': []})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'GET',
'path': '/testing/acl',
'query_params': {},
})
def test_reload_nonempty_result_w_user_project(self):
ROLE = 'role'
USER_PROJECT = 'user-project-123'
connection = _Connection(
{'items': [{'entity': 'allUsers', 'role': ROLE}]})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.user_project = USER_PROJECT
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'GET',
'path': '/testing/acl',
'query_params': {'userProject': USER_PROJECT},
})
def test_save_none_set_none_passed(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.save(client=client)
kw = connection._requested
self.assertEqual(len(kw), 0)
def test_save_existing_missing_none_passed(self):
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_no_acl(self):
ROLE = 'role'
AFTER = [{'entity': 'allUsers', 'role': ROLE}]
connection = _Connection({'acl': AFTER})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers').grant(ROLE)
acl.save(client=client)
self.assertEqual(list(acl), AFTER)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {'projection': 'full'},
'data': {'acl': AFTER},
})
def test_save_w_acl_w_user_project(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
USER_PROJECT = 'user-project-123'
new_acl = [{'entity': 'allUsers', 'role': ROLE1}]
connection = _Connection({'acl': [STICKY] + new_acl})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.user_project = USER_PROJECT
acl.save(new_acl, client=client)
entries = list(acl)
self.assertEqual(len(entries), 2)
self.assertTrue(STICKY in entries)
self.assertTrue(new_acl[0] in entries)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {
'projection': 'full',
'userProject': USER_PROJECT,
},
'data': {'acl': new_acl},
})
def test_save_prefefined_invalid(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
with self.assertRaises(ValueError):
acl.save_predefined('bogus', client=client)
def test_save_predefined_valid(self):
PREDEFINED = 'private'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {
'projection': 'full',
'predefinedAcl': PREDEFINED,
},
'data': {'acl': []},
})
def test_save_predefined_w_XML_alias(self):
PREDEFINED_XML = 'project-private'
PREDEFINED_JSON = 'projectPrivate'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED_XML, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {
'projection': 'full',
'predefinedAcl': PREDEFINED_JSON,
},
'data': {'acl': []},
})
def test_save_predefined_valid_w_alternate_query_param(self):
# Cover case where subclass overrides _PREDEFINED_QUERY_PARAM
PREDEFINED = 'publicRead'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl._PREDEFINED_QUERY_PARAM = 'alternate'
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {
'projection': 'full',
'alternate': PREDEFINED,
},
'data': {'acl': []},
})
def test_clear(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
connection = _Connection({'acl': [STICKY]})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers', ROLE1)
acl.clear(client=client)
self.assertEqual(list(acl), [STICKY])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0], {
'method': 'PATCH',
'path': '/testing',
'query_params': {'projection': 'full'},
'data': {'acl': []},
})
class Test_BucketACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import BucketACL
return BucketACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/acl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
def test_user_project(self):
NAME = 'name'
USER_PROJECT = 'user-project-123'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertIsNone(acl.user_project)
bucket.user_project = USER_PROJECT
self.assertEqual(acl.user_project, USER_PROJECT)
class Test_DefaultObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import DefaultObjectACL
return DefaultObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/defaultObjectAcl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
class Test_ObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ObjectACL
return ObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
bucket = _Bucket(NAME)
blob = _Blob(bucket, BLOB_NAME)
acl = self._make_one(blob)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.blob, blob)
self.assertEqual(acl.reload_path, '/b/%s/o/%s/acl' % (NAME, BLOB_NAME))
self.assertEqual(acl.save_path, '/b/%s/o/%s' % (NAME, BLOB_NAME))
def test_user_project(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
USER_PROJECT = 'user-project-123'
bucket = _Bucket(NAME)
blob = _Blob(bucket, BLOB_NAME)
acl = self._make_one(blob)
self.assertIsNone(acl.user_project)
blob.user_project = USER_PROJECT
self.assertEqual(acl.user_project, USER_PROJECT)
class _Blob(object):
user_project = None
def __init__(self, bucket, blob):
self.bucket = bucket
self.blob = blob
@property
def path(self):
return '%s/o/%s' % (self.bucket.path, self.blob)
class _Bucket(object):
user_project = None
def __init__(self, name):
self.name = name
@property
def path(self):
return '/b/%s' % self.name
class _Connection(object):
_delete_ok = False
def __init__(self, *responses):
self._responses = responses
self._requested = []
self._deleted = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _Client(object):
def __init__(self, connection):
self._connection = connection
|
|
#!/usr/bin/env python
import numpy as np
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
def debye_fun(x):
"""
Return debye integral value
- calculation done using interpolation in a lookup table
- interpolation done in log-space where behavior is close to linear
- linear extrapolation is implemented manually
"""
if(np.isscalar(x)):
assert x >= 0, 'x values must be greater than zero.'
else:
#np.absolute(x)
assert all(x >= 0), 'x values must be greater than zero.'
# Lookup table
# interpolate in log space where behavior is nearly linear
debyex = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8,
3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0])
debyelogf = np.array([ 0.0, -0.03770187, -0.07580279, -0.11429475,
-0.15316866, -0.19241674, -0.2320279 , -0.27199378,
-0.31230405, -0.35294619, -0.39390815, -0.43518026,
-0.47674953, -0.51860413, -0.56072866, -0.64573892,
-0.73167389, -0.81841793, -0.90586032, -0.99388207,
-1.08236598, -1.17119911, -1.26026101, -1.34944183,
-1.43863241, -1.52771969, -1.61660856, -1.70519469,
-1.79338479, -1.88108917, -1.96822938, -2.05471771,
-2.14049175, -2.35134476, -2.55643273, -2.75507892,
-2.94682783, -3.13143746, -3.30880053, -3.47894273,
-3.64199587, -3.79820337, -3.94785746])
# Create interpolation function
logdebfun = interp1d(debyex, debyelogf, kind='cubic', bounds_error=False,
fill_value=np.nan)
logfval = logdebfun(x)
# Check for extrapolated values indicated by NaN
# - replace with linear extrapolation
logfval = np.where(x > debyex[-1], debyelogf[-1] + (x - debyex[-1]) *
(debyelogf[-1]-debyelogf[-2])/(debyex[-1]-debyex[-2]),
logfval)
# Exponentiate to get integral value
return np.exp(logfval)
def MGD_PowerLaw(volume, temperature,p_eos, Natom): # MGD_PowerLaw
if np.isscalar(temperature):
temperature = temperature*np.ones(len(volume))
assert len(p_eos)==6, 'EOS parameter array must have correct length of 6'
assert len(volume)==len(temperature), 'temperature should be a scalar or its length should be equal to volume'
Kb = 13.806488*10**(-24) #J per K
P_conv_Fac= 160.217657*6.24*10**(18) #GPa in 1 J/Ang^3
C_DP = 3*Kb*Natom*P_conv_Fac
# Natom = # of atoms in unitcell
# V = volume of unitcell
#the cold function(Vinet function) Vinet function parameters
#V0_0Fe = 162.12
#K0_0Fe = 262.3
#Kp_0Fe = 4.044
#V0_13Fe = 163.16
#K0_13Fe = 243.8
#Kp_13Fe = 4.160
V0 = p_eos[0]
K0 = p_eos[1]
Kp = p_eos[2]
#Thermal function parameters
#Theta0_0Fe = 1000
#Gamma0_0Fe = 1.675
#q_0Fe = 1.39
#Theta0_13Fe = 1000
#Gamma0_13Fe = 1.400
#q_13Fe = 0.56
Theta0 = p_eos[3]
Gamma0 = p_eos[4]
q = p_eos[5]
RefT = 300.0 #unit is K
x = (volume/V0)**(1./3)
Vinet = 3*K0*(1.-x)*x**(-2)*np.exp(3./2.*(Kp - 1.)*(1. - x))
gamma= Gamma0 *(volume/V0)**q
theta = Theta0*np.exp((-1)*(gamma-Gamma0)/q)
#compute the P_thermal(V,300K)
Debye_Int = debye_fun(theta/RefT)
P_th_ref = C_DP*gamma*RefT*Debye_Int/volume
#compute P_th in different temperatures
P_th = (C_DP*gamma*temperature*debye_fun(theta/temperature))/volume
#compute P(V,T)
MGD = Vinet + P_th - P_th_ref
return MGD
def MGD_PowerLawShifted(volume, temperature, p_eos, Natom):
# Natom = # of atoms in unitcell
# V = volume of unitcell
P_conv_Fac= 160.217657 #GPa in 1 eV/Ang^3
Kb = 8.6173324e-5 #eV per K
C_DP = 3*Kb*Natom#Dulong-Petit limit for Cv
#Vinet function parameters
#sequence of the p_eos: V0, K0, Kp, theta0, gamma0, q
V0 = p_eos[0] #V0_Ne = 22.234
K0 = p_eos[1] #K0_Ne = 1.070
Kp = p_eos[2] #Kp_Ne = 8.40
#Thermal function parameters
Theta0 = p_eos[3] #Theta0_Ne = 75.1
Gamma0 = p_eos[4] #Gamma0_Ne = 2.442
q = p_eos[5] #q_Ne = 0.97
#RefT = 0
x = (volume/V0)**(1./3)
Vinet = 3.*K0*(1-x)*x**(-2)*np.exp(3./2.*(Kp - 1.)*(1-x)) #Pcold = Vinet_Ne
gammaV = Gamma0*x**(3*q)+1./2
thetaV = Theta0*x**(-3./2)*np.exp(Gamma0/q*((1-x**(3.*q))))
debye_Int = debye_fun(thetaV/temperature)
P_th = (C_DP*temperature*gammaV/volume*debye_Int)*P_conv_Fac
#compute P(V,T)
MGD = Vinet + P_th
return MGD
"""
#test Dewaele's table
p_eos = np.array([22.234,1.070,8.40,75.1,2.442,0.97])
volume = np.array([13.69743329, 12.31533725, 10.845, 10.305, 7.827])
temperature = np.array([298,298,500,750,900])
print (MGD_PowerLawShifted(volume, temperature,p_eos,4))
"""
#plot Dewaele's table
#sequence of the p_eos: V0, K0, Kp, theta0, gamma0, q
"""
p_eos = np.array([22.234,1.070,8.40,75.1,2.442,0.97])
Nat = 1
Nedat = np.loadtxt(fname='Ne.md', delimiter='|', skiprows=3)
#temp_298 = np.zeros([34])
vol_298 = np.zeros([34])
ob_298 = np.zeros([34])
#temp_500 = np.zeros([5])
vol_500 = np.zeros([5])
ob_500 = np.zeros([5])
#temp_600 = np.zeros([6])
vol_600 = np.zeros([6])
ob_600 = np.zeros([6])
#temp_750 = np.zeros([5])
vol_750 = np.zeros([5])
ob_750 = np.zeros([5])
#temp_900 = np.zeros([6])
vol_900 = np.zeros([6])
ob_900 = np.zeros([6])
i_298 = 0
i_500 = 0
i_600 = 0
i_750 = 0
i_900 = 0
for ind in range(len(Nedat)):
if Nedat[ind,0] == 298:
ob_298[i_298] = Nedat[ind,1]
vol_298[i_298] = Nedat[ind,2]
i_298 = i_298 + 1
if Nedat[ind,0] > 499 and Nedat[ind,0] < 502:
ob_500[i_500] = Nedat[ind,1]
vol_500[i_500] = Nedat[ind,2]
i_500 = i_500 + 1
if Nedat[ind,0] == 600:
ob_600[i_600] = Nedat[ind,1]
vol_600[i_600] = Nedat[ind,2]
i_600 = i_600 + 1
if Nedat[ind,0] == 750:
ob_750[i_750] = Nedat[ind,1]
vol_750[i_750] = Nedat[ind,2]
i_750 = i_750 + 1
if Nedat[ind,0] == 900:
ob_900[i_900] = Nedat[ind,1]
vol_900[i_900] = Nedat[ind,2]
i_900 = i_900 + 1
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([298, 500, 600, 750, 900])
model_298 = MGD_PowerLawShifted(volume1,T[0]*np.ones(volume1.shape),p_eos,Nat)
model_500 = MGD_PowerLawShifted(volume1,T[1]*np.ones(volume1.shape),p_eos,Nat)
model_600 = MGD_PowerLawShifted(volume1,T[2]*np.ones(volume1.shape),p_eos,Nat)
model_750 = MGD_PowerLawShifted(volume1,T[3]*np.ones(volume1.shape),p_eos,Nat)
model_900 = MGD_PowerLawShifted(volume1,T[4]*np.ones(volume1.shape),p_eos,Nat)
plt.plot(model_298,volume1,'k',label = '298 Model')
plt.plot(model_500,volume1,'c',label = '500 Model')
plt.plot(model_600,volume1,'r',label = '600 Model')
plt.plot(model_750,volume1,'m',label = '750 Model')
plt.plot(model_900,volume1,'y',label = '900 Model')
plt.plot(ob_298,vol_298, 'ko',label = '298')
plt.plot(ob_500,vol_500, 'co',label = '500')
plt.plot(ob_600,vol_600, 'ro',label = '600')
plt.plot(ob_750,vol_750, 'mo',label = '750')
plt.plot(ob_900,vol_900, 'yo',label = '900')
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.legend()
plt.show()
test298 = MGD_PowerLawShifted(vol_298,T[0]*np.ones(vol_298.shape),p_eos,1)
#print "vol_298",vol_298
#print test298
print test298 - ob_298
#print model_500 - ob_500
"""
#test Dr Wolf's table
#volume = np.array([146.59, 145.81, 144.97, 144.32, 146.35, 131.26,142.52,133.96,125.42,133.86,133.91,133.71,125.42,125.40,124.05])
#temperature = np.array([300,300,300,300,1700,300,1924,2375,2020,1755,1780,1740,2228,2240,2045])
#p_eos_0Fe = np.array([162.12,262.3,4.044,1000,1.675,1.39])
#print (MGD_Vinet(volume, temperature, p_eos1,20))
#plot 13% Fe
"""
p_eos = np.array([163.16,243.8,4.160,1000,1.400,0.56])
Pvdat = np.loadtxt(fname='Fe_13.md', delimiter='|', skiprows=3)
temp_300 = np.zeros([49])
vol_300 = np.zeros([49])
ob_300 = np.zeros([49])
i_300 = 0
for ind in range(len(Pvdat)):
if Pvdat[ind,0] == 300:
temp_300[i_300] = Pvdat[ind,0]
ob_300[i_300] = Pvdat[ind,1]
vol_300[i_300] = Pvdat[ind,2]
i_300 = i_300 + 1
#p_300 = MGD_Vinet(vol_300, temp_300,p_eos_13Fe,20)
plt.plot(ob_300,vol_300, 'ko',label = '300')
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
temp1 = np.zeros(len(volume1))
model_300 = MGD_PowerLaw(volume1,temp1+300,p_eos,20)
print(model_300)
plt.plot(model_300,volume1,'k',label = '300 Model')
plt.xlim([20,140])
plt.ylim([122,155])
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.legend()
plt.show()
"""
####color plot Wolf's PVTMgPvTange.txt
"""
Pvdat = np.loadtxt(fname='PVTMgPvTange.txt', skiprows=1)
volume = Pvdat[:,5]
experiment_P = Pvdat[:,1]
p_eos = np.array([162.12,262.3,4.044,1000,1.675,1.39])
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([300,500,700,900,1700,1900,2100,2300,2500])
model_P_300 = MGD_PowerLaw(volume1,T[0]*np.ones(volume1.shape),p_eos,20)
model_P_500 = MGD_PowerLaw(volume1,T[1]*np.ones(volume1.shape),p_eos,20)
model_P_700 = MGD_PowerLaw(volume1,T[2]*np.ones(volume1.shape),p_eos,20)
model_P_900 = MGD_PowerLaw(volume1,T[3]*np.ones(volume1.shape),p_eos,20)
model_P_1700 = MGD_PowerLaw(volume1,T[4]*np.ones(volume1.shape),p_eos,20)
model_P_1900 = MGD_PowerLaw(volume1,T[5]*np.ones(volume1.shape),p_eos,20)
model_P_2100 = MGD_PowerLaw(volume1,T[6]*np.ones(volume1.shape),p_eos,20)
model_P_2300 = MGD_PowerLaw(volume1,T[7]*np.ones(volume1.shape),p_eos,20)
model_P_2500 = MGD_PowerLaw(volume1,T[8]*np.ones(volume1.shape),p_eos,20)
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.clf()
cmap = plt.get_cmap('gist_rainbow')
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
plt.colorbar(ticks=range(300,2500,500))
plt.clim([300, 2500])
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
plt.legend()
plt.plot(model_P_300,volume1,c = cmap(30))
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
####color plot Wolf's PVTMgFePvWolf.txt
"""
Pvdat = np.loadtxt(fname='PVTMgFePvWolf.txt', skiprows=1)
volume = Pvdat[:,5]
experiment_P = Pvdat[:,1]
p_eos = np.array([163.16,243.8,4.160,1000,1.400,0.56])
volume1 = np.linspace(0.2,1.05,200)*p_eos[0]
T = np.array([300,500,700,900,1700,1900,2100,2300,2500])
model_P_300 = MGD_PowerLaw(volume1,T[0]*np.ones(volume1.shape),p_eos,20)
model_P_500 = MGD_PowerLaw(volume1,T[1]*np.ones(volume1.shape),p_eos,20)
model_P_700 = MGD_PowerLaw(volume1,T[2]*np.ones(volume1.shape),p_eos,20)
model_P_900 = MGD_PowerLaw(volume1,T[3]*np.ones(volume1.shape),p_eos,20)
model_P_1700 = MGD_PowerLaw(volume1,T[4]*np.ones(volume1.shape),p_eos,20)
model_P_1900 = MGD_PowerLaw(volume1,T[5]*np.ones(volume1.shape),p_eos,20)
model_P_2100 = MGD_PowerLaw(volume1,T[6]*np.ones(volume1.shape),p_eos,20)
model_P_2300 = MGD_PowerLaw(volume1,T[7]*np.ones(volume1.shape),p_eos,20)
model_P_2500 = MGD_PowerLaw(volume1,T[8]*np.ones(volume1.shape),p_eos,20)
"""
"""
plt.plot(experiment_P,volume,'ko',label = 'experiment')
plt.plot(model_P_300,volume1,'b',label = '300K')
plt.plot(model_P_500,volume1,'g',label = '500K')
plt.plot(model_P_700,volume1,'r',label = '700K')
plt.plot(model_P_900,volume1,'y',label = '900K')
plt.plot(model_P_1700,volume1,'b',label = '1700K')
plt.plot(model_P_1900,volume1,'g',label = '1900K')
plt.plot(model_P_2100,volume1,'r',label = '2100K')
plt.plot(model_P_2300,volume1,'y',label = '2300K')
plt.plot(model_P_2500,volume1,'b',label = '2500K')
#plt.xlim([20,140])
#plt.ylim([122,155])
"""
"""
plt.ylabel('Volume[' r'$A^{3}$'']')
plt.xlabel('Pressure [GPa]')
plt.clf()
"""
"""
plt.scatter(model_P_300,volume1,30,T[0]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_500,volume1,30,T[1]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_700,volume1,30,T[2]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_900,volume1,30,T[3]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_1700,volume1,30,T[4]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_1900,volume1,30,T[5]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2100,volume1,30,T[6]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2300,volume1,30,T[7]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
plt.scatter(model_P_2500,volume1,30,T[8]*np.ones(volume1.shape),'-',cmap=cmap,label='Pressure')
"""
###original plotting script
"""
cmap = plt.get_cmap('gist_rainbow')
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
plt.colorbar(ticks=range(300,2500,500))
plt.clim([300, 2500])
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
plt.legend()
plt.plot(model_P_300,volume1,c = cmap(30))
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
###test plotting below
"""
cmap = plt.get_cmap('gist_rainbow')
climvals = [300,2500]
plt.colorbar(ticks=range(300,2500,500))
plt.clim(climvals)
plt.xlim([20,140])
plt.ylim([122,155])
legend = plt.legend(loc='upper right')
Tcolbar = np.linspace(climvals[0],climvals[1],len(cmap))
Indcolbar = np.range(0,len(cmap))
plt.scatter(experiment_P,volume,30,Pvdat[:,3],'o',cmap=cmap,label='Pressure')
for ind, Tval in enumerate(T):
indcmap = np.interp1d(Tcolbar,Indcolbar,Tval,kind='nearest')
plt.plot(model_P[ind],volume1,c = cmap[indcmap])
plt.plot(model_P_500,volume1,c = cmap(50))
plt.plot(model_P_700,volume1,c = cmap(70))
plt.plot(model_P_900,volume1,c = cmap(90))
plt.plot(model_P_1700,volume1,c = cmap(170))
plt.plot(model_P_1900,volume1,c = cmap(190))
plt.plot(model_P_2100,volume1,c = cmap(210))
plt.plot(model_P_2300,volume1,c = cmap(230))
plt.plot(model_P_2500,volume1,c = cmap(250))
plt.show()
"""
|
|
# ==============================================================================
# Copyright 2018-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
import time
import getpass
from platform import system
import numpy as np
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.framework import load_library
# This will turn off V1 API related warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import ctypes
__all__ = [
'enable', 'disable', 'is_enabled', 'list_backends',
'set_backend', 'get_backend',
'start_logging_placement', 'stop_logging_placement',
'is_logging_placement', '__version__', 'cxx11_abi_flag'
'is_grappler_enabled', 'update_config',
'set_disabled_ops', 'get_disabled_ops',
]
ext = 'dylib' if system() == 'Darwin' else 'so'
TF_VERSION = tf.version.VERSION
TF_GIT_VERSION = tf.version.GIT_VERSION
TF_VERSION_NEEDED = "${TensorFlow_VERSION}"
TF_GIT_VERSION_BUILT_WITH = "${TensorFlow_GIT_VERSION}"
# converting version representations to strings if not already
try:
TF_VERSION = str(TF_VERSION, 'ascii')
except TypeError: # will happen for python 2 or if already string
pass
try:
TF_VERSION_NEEDED = str(TF_VERSION_NEEDED, 'ascii')
except TypeError:
pass
try:
if TF_GIT_VERSION.startswith("b'"): # TF version can be a bytes __repr__()
TF_GIT_VERSION = eval(TF_GIT_VERSION)
TF_GIT_VERSION = str(TF_GIT_VERSION, 'ascii')
except TypeError:
pass
try:
if TF_GIT_VERSION_BUILT_WITH.startswith("b'"):
TF_GIT_VERSION_BUILT_WITH = eval(TF_GIT_VERSION_BUILT_WITH)
TF_GIT_VERSION_BUILT_WITH = str(TF_GIT_VERSION_BUILT_WITH, 'ascii')
except TypeError:
pass
# print("TensorFlow version installed: {0} ({1})".format(TF_VERSION,
# TF_GIT_VERSION))
# print("nGraph bridge built with: {0} ({1})".format(TF_VERSION_NEEDED,
# TF_GIT_VERSION_BUILT_WITH))
# We need to revisit this later. We can automate that using cmake configure
# command.
TF_INSTALLED_VER = TF_VERSION.split('.')
TF_NEEDED_VER = TF_VERSION_NEEDED.split('.')
ngraph_classic_loaded = True
ngraph_bridge_lib = None
if (TF_INSTALLED_VER[0] == TF_NEEDED_VER[0]) and \
(TF_INSTALLED_VER[1] == TF_NEEDED_VER[1]) and \
((TF_INSTALLED_VER[2].split('-'))[0] == (TF_NEEDED_VER[2].split('-'))[0]):
libpath = os.path.dirname(__file__)
full_lib_path = os.path.join(libpath, 'libngraph_bridge.' + ext)
_ = load_library.load_op_library(full_lib_path)
ngraph_bridge_lib = ctypes.cdll.LoadLibrary(full_lib_path)
else:
raise ValueError(
"Error: Installed TensorFlow version {0}\nnGraph bridge built with: {1}"
.format(TF_VERSION, TF_VERSION_NEEDED))
def requested():
return ops.get_default_graph()._attr_scope({
"_ngraph_requested":
attr_value_pb2.AttrValue(b=True)
})
if ngraph_classic_loaded:
ngraph_bridge_lib.is_enabled.restype = ctypes.c_bool
ngraph_bridge_lib.list_backends.argtypes = [ctypes.POINTER(ctypes.c_char_p)]
ngraph_bridge_lib.list_backends.restype = ctypes.c_bool
ngraph_bridge_lib.set_backend.argtypes = [ctypes.c_char_p]
ngraph_bridge_lib.set_backend.restype = ctypes.c_bool
ngraph_bridge_lib.get_backend.argtypes = [ctypes.POINTER(ctypes.c_char_p)]
ngraph_bridge_lib.get_backend.restype = ctypes.c_bool
ngraph_bridge_lib.is_logging_placement.restype = ctypes.c_bool
ngraph_bridge_lib.version.restype = ctypes.c_char_p
ngraph_bridge_lib.ngraph_version.restype = ctypes.c_char_p
ngraph_bridge_lib.cxx11_abi_flag.restype = ctypes.c_int
ngraph_bridge_lib.is_grappler_enabled.restype = ctypes.c_bool
ngraph_bridge_lib.set_disabled_ops.argtypes = [ctypes.c_char_p]
ngraph_bridge_lib.get_disabled_ops.restype = ctypes.c_char_p
def enable():
ngraph_bridge_lib.enable()
def disable():
ngraph_bridge_lib.disable()
def is_enabled():
return ngraph_bridge_lib.is_enabled()
def list_backends():
len_backends = ngraph_bridge_lib.backends_len()
result = (ctypes.c_char_p * len_backends)()
if not ngraph_bridge_lib.list_backends(result):
raise Exception("Expected " + str(len_backends) +
" backends, but got some other number of backends")
list_result = list(result)
# convert bytes to string required for py3 (encode/decode bytes)
backend_list = []
for backend in list_result:
backend_list.append(backend.decode("utf-8"))
return backend_list
def set_backend(backend):
if not ngraph_bridge_lib.set_backend(backend.encode("utf-8")):
raise Exception("Backend " + backend + " unavailable.")
def get_backend():
result = ctypes.c_char_p()
if not ngraph_bridge_lib.get_backend(ctypes.byref(result)):
raise Exception("Cannot get currently set backend")
return result.value.decode("utf-8")
def start_logging_placement():
ngraph_bridge_lib.start_logging_placement()
def stop_logging_placement():
ngraph_bridge_lib.stop_logging_placement()
def is_logging_placement():
return ngraph_bridge_lib.is_logging_placement()
def cxx11_abi_flag():
return ngraph_bridge_lib.cxx11_abi_flag()
def is_grappler_enabled():
return ngraph_bridge_lib.is_grappler_enabled()
def update_config(config, backend_name = "CPU", device_id = ""):
#updating session config if grappler is enabled
if(ngraph_bridge_lib.is_grappler_enabled()):
opt_name = 'ngraph-optimizer'
# If the config already has ngraph-optimizer, then do not update it
if config.HasField('graph_options'):
if config.graph_options.HasField('rewrite_options'):
custom_opts = config.graph_options.rewrite_options.custom_optimizers
for i in range(len(custom_opts)):
if custom_opts[i].name == opt_name:
return config
rewriter_options = rewriter_config_pb2.RewriterConfig()
rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
rewriter_options.min_graph_nodes=-1
ngraph_optimizer = rewriter_options.custom_optimizers.add()
ngraph_optimizer.name = opt_name
ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
# For reference, if we want to provide configuration support(backend parameters)
# in a python script using the ngraph-optimizer
# rewriter_options = rewriter_config_pb2.RewriterConfig()
# rewriter_options.meta_optimizer_iterations=(rewriter_config_pb2.RewriterConfig.ONE)
# rewriter_options.min_graph_nodes=-1
# ngraph_optimizer = rewriter_options.custom_optimizers.add()
# ngraph_optimizer.name = "ngraph-optimizer"
# ngraph_optimizer.parameter_map["device_id"].s = device_id.encode()
# ngraph_optimizer.parameter_map["max_batch_size"].s = b'64'
# ngraph_optimizer.parameter_map["ice_cores"].s = b'12'
# config.MergeFrom(tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(rewrite_options=rewriter_options)))
return config
def set_disabled_ops(unsupported_ops):
ngraph_bridge_lib.set_disabled_ops(unsupported_ops.encode("utf-8"))
def get_disabled_ops():
return ngraph_bridge_lib.get_disabled_ops()
__version__ = \
"nGraph bridge version: " + str(ngraph_bridge_lib.version()) + "\n" + \
"nGraph version used for this build: " + str(ngraph_bridge_lib.ngraph_version()) + "\n" + \
"TensorFlow version used for this build: " + TF_GIT_VERSION_BUILT_WITH + "\n" \
"CXX11_ABI flag used for this build: " + str(ngraph_bridge_lib.cxx11_abi_flag()) + "\n" \
"nGraph bridge built with Grappler: " + str(ngraph_bridge_lib.is_grappler_enabled()) + "\n" \
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import uuidutils
import six
from senlin.common import consts
from senlin.common import context
from senlin.common import exception
from senlin.common.i18n import _
from senlin.common.i18n import _LE
from senlin.common.i18n import _LI
from senlin.common import messaging as rpc_messaging
from senlin.common import utils
from senlin.db import api as db_api
from senlin.engine.actions import base as action_mod
from senlin.engine import cluster as cluster_mod
from senlin.engine import dispatcher
from senlin.engine import environment
from senlin.engine import event as event_mod
from senlin.engine import health_manager
from senlin.engine import node as node_mod
from senlin.engine import scheduler
from senlin.engine import senlin_lock
from senlin.openstack.common import service
from senlin.policies import base as policy_base
from senlin.profiles import base as profile_base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def request_context(func):
@functools.wraps(func)
def wrapped(self, ctx, *args, **kwargs):
if ctx is not None and not isinstance(ctx, context.RequestContext):
ctx = context.RequestContext.from_dict(ctx.to_dict())
try:
return func(self, ctx, *args, **kwargs)
except exception.SenlinException:
raise oslo_messaging.rpc.dispatcher.ExpectedException()
return wrapped
class EngineService(service.Service):
'''Lifecycle manager for a running service engine.
- All the methods in here are called from the RPC client.
- If a RPC call does not have a corresponding method here, an exception
will be thrown.
- Arguments to these calls are added dynamically and will be treated as
keyword arguments by the RPC client.
'''
def __init__(self, host, topic, manager=None):
super(EngineService, self).__init__()
self.host = host
self.topic = topic
self.dispatcher_topic = consts.ENGINE_DISPATCHER_TOPIC
self.health_mgr_topic = consts.ENGINE_HEALTH_MGR_TOPIC
# The following are initialized here, but assigned in start() which
# happens after the fork when spawning multiple worker processes
self.engine_id = None
self.TG = None
self.target = None
self._rpc_server = None
# Intialize the global environment
environment.initialize()
def init_tgm(self):
self.TG = scheduler.ThreadGroupManager()
def start(self):
self.engine_id = senlin_lock.BaseLock.generate_engine_id()
self.init_tgm()
# create a dispatcher greenthread for this engine.
self.dispatcher = dispatcher.Dispatcher(self,
self.dispatcher_topic,
consts.RPC_API_VERSION,
self.TG)
LOG.debug("Starting dispatcher for engine %s" % self.engine_id)
self.dispatcher.start()
# create a health manager greenthread for this engine.
self.health_mgr = health_manager.Health_Manager(self,
self.health_mgr_topic,
consts.RPC_API_VERSION,
self.TG)
LOG.debug("Starting health manager for engine %s" % self.engine_id)
self.health_mgr.start()
target = oslo_messaging.Target(version=consts.RPC_API_VERSION,
server=self.host,
topic=self.topic)
self.target = target
self._rpc_server = rpc_messaging.get_rpc_server(target, self)
self._rpc_server.start()
super(EngineService, self).start()
def _stop_rpc_server(self):
# Stop RPC connection to prevent new requests
LOG.debug(_("Attempting to stop engine service..."))
try:
self._rpc_server.stop()
self._rpc_server.wait()
LOG.info(_LI('Engine service stopped successfully'))
except Exception as ex:
LOG.error(_LE('Failed to stop engine service: %s'),
six.text_type(ex))
def stop(self):
self._stop_rpc_server()
# Notify dispatcher to stop all action threads it started.
self.dispatcher.stop()
# Notify health_manager to stop
self.health_mgr.stop()
self.TG.stop()
# Terminate the engine process
LOG.info(_LI("All threads were gone, terminating engine"))
super(EngineService, self).stop()
@request_context
def get_revision(self, context):
return cfg.CONF.revision['senlin_engine_revision']
@request_context
def profile_type_list(self, context):
return environment.global_env().get_profile_types()
@request_context
def profile_type_schema(self, context, type_name):
profile = environment.global_env().get_profile(type_name)
data = dict((name, dict(schema))
for name, schema in profile.spec_schema.items())
return {'spec': data}
@request_context
def profile_find(self, context, identity, show_deleted=False):
'''Find a profile with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
profile = db_api.profile_get(context, identity,
show_deleted=show_deleted)
if not profile:
profile = db_api.profile_get_by_name(context, identity)
else:
profile = db_api.profile_get_by_name(context, identity)
if not profile:
profile = db_api.profile_get_by_short_id(context, identity)
if not profile:
raise exception.ProfileNotFound(profile=identity)
return profile
@request_context
def profile_list(self, context, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, show_deleted=False):
if limit is not None:
limit = utils.parse_int_param('limit', limit)
if show_deleted is not None:
show_deleted = utils.parse_bool_param('show_deleted',
show_deleted)
profiles = profile_base.Profile.load_all(context, limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters,
show_deleted=show_deleted)
return [p.to_dict() for p in profiles]
@request_context
def profile_create(self, context, name, type, spec, perm=None, tags=None):
LOG.info(_LI('Creating profile %s: %s'), type, name)
plugin = environment.global_env().get_profile(type)
kwargs = {
'spec': spec,
'permission': perm,
'tags': tags,
}
profile = plugin(context, type, name, **kwargs)
profile.validate()
profile.store(context)
return profile.to_dict()
@request_context
def profile_get(self, context, identity):
db_profile = self.profile_find(context, identity)
profile = profile_base.Profile.load(context, profile=db_profile)
return profile.to_dict()
@request_context
def profile_update(self, context, profile_id, name=None, spec=None,
permission=None, tags=None):
db_profile = self.profile_find(context, profile_id)
if spec is None:
profile = profile_base.Profile.load(context, profile=db_profile)
changed = False
if name is not None and name != profile.name:
profile.name = name
changed = True
if permission is not None and permission != profile.permission:
profile.permission = permission
changed = True
if tags is not None and tags != profile.tags:
profile.tags = tags
changed = True
if changed:
profile.store(context)
return profile.to_dict()
plugin = environment.global_env().get_profile(db_profile.type)
new_spec = copy.deepcopy(db_profile.spec)
new_spec.update(spec)
kwargs = {
'spec': new_spec,
'permission': permission or db_profile.permission,
'tags': tags or db_profile.tags,
}
new_name = name or db_profile.name
profile = plugin(context, db_profile.type, new_name, **kwargs)
profile.validate()
profile.store(context)
return profile.to_dict()
@request_context
def profile_delete(self, context, identity):
db_profile = self.profile_find(context, identity)
LOG.info(_LI('Deleting profile: %s'), identity)
profile_base.Profile.delete(context, db_profile.id)
return None
@request_context
def policy_type_list(self, context):
return environment.global_env().get_policy_types()
@request_context
def policy_type_schema(self, context, type_name):
policy_type = environment.global_env().get_policy(type_name)
data = dict((name, dict(schema))
for name, schema in policy_type.spec_schema.items())
return {'spec': data}
@request_context
def policy_find(self, context, identity, show_deleted=False):
'''Find a policy with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
policy = db_api.policy_get(context, identity,
show_deleted=show_deleted)
if not policy:
policy = db_api.policy_get_by_name(context, identity)
else:
policy = db_api.policy_get_by_name(context, identity)
if not policy:
policy = db_api.policy_get_by_short_id(context, identity)
if not policy:
raise exception.PolicyNotFound(policy=identity)
return policy
@request_context
def policy_list(self, context, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, show_deleted=None):
if limit is not None:
limit = utils.parse_int_param('limit', limit)
if show_deleted is not None:
show_deleted = utils.parse_bool_param('show_deleted',
show_deleted)
policies = policy_base.Policy.load_all(context, limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters,
show_deleted=show_deleted)
return [p.to_dict() for p in policies]
@request_context
def policy_create(self, context, name, type, spec, level=None,
cooldown=None):
level = utils.parse_int_param('level', level)
cooldown = utils.parse_int_param('cooldown', cooldown)
plugin = environment.global_env().get_policy(type)
LOG.info(_LI('Creating policy %s:%s'), type, name)
kwargs = {
'spec': spec,
'level': level,
'cooldown': cooldown,
}
policy = plugin(type, name, **kwargs)
policy.validate()
policy.store(context)
return policy.to_dict()
@request_context
def policy_get(self, context, identity):
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, policy=db_policy)
return policy.to_dict()
@request_context
def policy_update(self, context, identity, name=None, level=None,
cooldown=None):
db_policy = self.policy_find(context, identity)
policy = policy_base.Policy.load(context, policy=db_policy)
changed = False
if name is not None and name != policy.name:
policy.name = name
changed = True
if level is not None and level != policy.level:
level = utils.parse_int_param('level', level)
policy.level = level
changed = True
if cooldown is not None and cooldown != policy.cooldown:
cooldown = utils.parse_int_param('cooldown', cooldown)
policy.cooldown = cooldown
changed = True
if changed:
policy.store(context)
return policy.to_dict()
@request_context
def policy_delete(self, context, identity):
db_policy = self.policy_find(context, identity)
LOG.info(_LI('Delete policy: %s'), identity)
policy_base.Policy.delete(context, db_policy.id)
return None
@request_context
def cluster_list(self, context, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False):
limit = utils.parse_int_param('limit', limit)
tenant_safe = utils.parse_bool_param('tenant_safe', tenant_safe)
show_deleted = utils.parse_bool_param('show_deleted', show_deleted)
show_nested = utils.parse_bool_param('show_nested', show_nested)
clusters = cluster_mod.Cluster.load_all(context, limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested)
return [cluster.to_dict() for cluster in clusters]
def cluster_find(self, context, identity, show_deleted=False):
'''Find a cluster with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
cluster = db_api.cluster_get(context, identity,
show_deleted=show_deleted)
# maybe the name is in uuid format, so if get by id returns None,
# we should get the info by name again
if not cluster:
cluster = db_api.cluster_get_by_name(context, identity)
else:
cluster = db_api.cluster_get_by_name(context, identity)
# maybe it is a short form of UUID
if not cluster:
cluster = db_api.cluster_get_by_short_id(context, identity)
if not cluster:
raise exception.ClusterNotFound(cluster=identity)
return cluster
@request_context
def cluster_get(self, context, identity):
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
return cluster.to_dict()
@request_context
def cluster_create(self, context, name, size, profile_id, parent=None,
tags=None, timeout=None):
db_profile = self.profile_find(context, profile_id)
size = utils.parse_int_param(consts.CLUSTER_SIZE, size)
if timeout is not None:
timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT, timeout)
LOG.info(_LI('Creating cluster %s'), name)
ctx = context.to_dict()
kwargs = {
'user': ctx.get('username', ''),
'project': ctx.get('tenant_id', ''),
'parent': parent,
'timeout': timeout,
'tags': tags
}
cluster = cluster_mod.Cluster(name, db_profile.id, size, **kwargs)
cluster.store(context)
# Build an Action for cluster creation
action = action_mod.Action(context, 'CLUSTER_CREATE',
name='cluster_create_%s' % cluster.id[:8],
target=cluster.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
# Notify Dispatchers that a new action has been ready.
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
# We return a cluster dictionary with an additional key carried
result = cluster.to_dict()
result['action'] = action.id
return result
@request_context
def cluster_update(self, context, identity, name=None, profile_id=None,
parent=None, tags=None, timeout=None):
def update_cluster_properties(cluster):
changed = False
# Check out if fields other than profile_id have to be changed
if name is not None and name != cluster.name:
cluster.name = name
changed = True
if parent is not None:
db_parent = self.cluster_find(context, parent)
if cluster.parent != db_parent.id:
cluster.parent = db_parent.id
changed = True
if tags is not None and tags != cluster.tags:
cluster.tags = tags
changed = True
if timeout is not None and timeout != cluster.timeout:
cluster.timeout = utils.parse_int_param(consts.CLUSTER_TIMEOUT,
timeout)
changed = True
if changed is True:
cluster.store(context)
return cluster.to_dict()
# Get the database representation of the existing cluster
db_cluster = self.cluster_find(context, identity)
cluster = cluster_mod.Cluster.load(context, cluster=db_cluster)
update_cluster_properties(cluster)
if profile_id is None or profile_id == cluster.profile_id:
return cluster.to_dict()
if cluster.status == cluster.ERROR:
msg = _('Updating a cluster when it is in error state')
raise exception.NotSupported(feature=msg)
new_profile = self.profile_find(context, profile_id)
old_profile = self.profile_find(context, cluster.profile_id)
if new_profile.type != old_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI("Updating cluster '%(cluster)s' to profile "
"'%(profile)s'.") % {'cluster': identity,
'profile': profile_id})
action = action_mod.Action(context, 'CLUSTER_UPDATE',
target=cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'profile_id': new_profile.id})
action.store(context)
# TODO(anyone): Uncomment the following line when update action
# is implemented.
# dispatcher.notify(context, self.dispatcher.NEW_ACTION,
# None, action_id=action.id)
result = cluster.to_dict()
result['action'] = action.id
return result
@request_context
def cluster_add_nodes(self, context, identity, nodes):
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
owned_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
# Skip node in the same cluster already
if db_node.status != node_mod.Node.ACTIVE:
bad_nodes.append(db_node.id)
elif db_node.cluster_id is not None:
owned_nodes.append(node)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(bad_nodes) > 0:
error = _("Nodes are not ACTIVE: %s") % bad_nodes
elif len(owned_nodes) > 0:
error = _("Nodes %s owned by other cluster, need to delete "
"them from those clusters first.") % owned_nodes
elif len(not_found) > 0:
error = _("Nodes not found: %s") % not_found
elif len(found) == 0:
error = _("No nodes to add: %s") % nodes
if error is not None:
raise exception.SenlinBadRequest(msg=error)
action_name = 'cluster_add_nodes_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_ADD_NODES',
name=action_name,
target=db_cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'nodes': found})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_del_nodes(self, context, identity, nodes):
db_cluster = self.cluster_find(context, identity)
found = []
not_found = []
bad_nodes = []
for node in nodes:
try:
db_node = self.node_find(context, node)
if db_node.cluster_id != db_cluster.id:
bad_nodes.append(db_node.id)
else:
found.append(db_node.id)
except exception.NodeNotFound:
not_found.append(node)
pass
error = None
if len(not_found) > 0:
error = _("Nodes %s not found") % nodes
elif len(bad_nodes) > 0:
error = _("Nodes %s not member of specified cluster") % bad_nodes
elif len(found) == 0:
error = _("No nodes specified") % nodes
if error is not None:
raise exception.SenlinBadRequest(msg=error)
action_name = 'cluster_del_nodes_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_DEL_NODES',
name=action_name,
target=db_cluster.id,
cause=action_mod.CAUSE_RPC,
inputs={'nodes': found})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_scale_out(self, context, identity, count=None):
# Validation
db_cluster = self.cluster_find(context, identity)
delta = utils.parse_int_param('count', count, allow_zero=False)
if delta is not None:
LOG.info(_LI('Scaling out cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': delta})
inputs = {'count': delta}
else:
LOG.info(_LI('Scaling out cluster %s'), db_cluster.name)
inputs = {}
action_name = 'cluster_scale_out_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_SCALE_OUT',
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_scale_in(self, context, identity, count=None):
db_cluster = self.cluster_find(context, identity)
delta = utils.parse_int_param('count', count, allow_zero=False)
if delta is not None:
LOG.info(_LI('Scaling in cluster %(name)s by %(delta)s nodes'),
{'name': identity, 'delta': delta})
inputs = {'count': delta}
else:
LOG.info(_LI('Scaling in cluster %s'), db_cluster.name)
inputs = {}
action_name = 'cluster_scale_in_%s' % db_cluster.id[:8]
action = action_mod.Action(context, 'CLUSTER_SCALE_IN',
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_delete(self, context, identity):
cluster = self.cluster_find(context, identity)
LOG.info(_LI('Deleting cluster %s'), cluster.name)
action = action_mod.Action(context, 'CLUSTER_DELETE',
name='cluster_delete_%s' % cluster.id[:8],
target=cluster.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
def node_find(self, context, identity, show_deleted=False):
'''Find a cluster with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
node = db_api.node_get(context, identity,
show_deleted=show_deleted)
if not node:
node = db_api.node_get_by_name(context, identity)
else:
node = db_api.node_get_by_name(context, identity)
if not node:
node = db_api.node_get_by_short_id(context, identity)
if node is None:
raise exception.NodeNotFound(node=identity)
return node
@request_context
def node_list(self, context, cluster_id=None, show_deleted=False,
limit=None, marker=None, sort_keys=None, sort_dir=None,
filters=None, tenant_safe=True):
# Maybe the cluster_id is a name or a short ID
if cluster_id is not None:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
nodes = node_mod.Node.load_all(context, cluster_id=cluster_id,
show_deleted=show_deleted,
limit=limit, marker=marker,
sort_keys=sort_keys, sort_dir=sort_dir,
filters=filters,
tenant_safe=tenant_safe)
return [node.to_dict() for node in nodes]
@request_context
def node_create(self, context, name, profile_id, cluster_id=None,
role=None, tags=None):
db_profile = self.profile_find(context, profile_id)
if cluster_id is not None:
db_cluster = self.cluster_find(context, cluster_id)
cluster_id = db_cluster.id
if context.project_id != db_cluster.project:
msg = _('Node and cluster are from different project, '
'operation is disallowed.')
raise exception.ProjectNotMatch(message=msg)
if profile_id != db_cluster.profile_id:
node_profile = self.profile_find(context, profile_id)
cluster_profile = self.profile_find(context,
db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Creating node %s'), name)
# Create a node instance
tags = tags or {}
node = node_mod.Node(name, db_profile.id, cluster_id, context,
role=role, tags=tags)
node.store(context)
action = action_mod.Action(context, 'NODE_CREATE',
name='node_create_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
# We return a node dictionary with an additional key (action) carried
result = node.to_dict()
result['action'] = action.id
return result
@request_context
def node_get(self, context, identity):
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
return node.to_dict()
@request_context
def node_update(self, context, identity, name=None, profile_id=None,
role=None, tags=None):
db_node = self.node_find(context, identity)
node = node_mod.Node.load(context, node=db_node)
changed = False
if name is not None and name != node.name:
node.name = name
changed = True
if role is not None and role != node.role:
node.role = role
changed = True
if tags is not None and tags != node.tags:
node.tags = tags
changed = True
if changed is True:
node.store(context)
if profile_id is None:
return
# The profile_id could be a name or a short ID, check it
db_profile = self.profile_find(context, profile_id)
profile_id = db_profile.id
# check if profile_type matches
node_profile = self.profile_find(context, node.profile_id)
if node_profile.type != db_profile.type:
msg = _('Cannot update a cluster to a different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Updating node %s'), identity)
action = action_mod.Action(context, 'NODE_UPDATE',
name='node_update_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
# TODO(someone): uncomment this when it is implemented
# dispatcher.notify(context, self.dispatcher.NEW_ACTION,
# None, action_id=action.id)
return
@request_context
def node_delete(self, context, identity, force=False):
db_node = self.node_find(context, identity)
LOG.info(_LI('Deleting node %s'), identity)
node = node_mod.Node.load(context, node=db_node)
action = action_mod.Action(context, 'NODE_DELETE',
name='node_delete_%s' % node.id[:8],
target=node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return action.to_dict()
@request_context
def node_join(self, context, identity, cluster_id):
db_node = self.node_find(context, identity)
db_cluster = self.cluster_find(context, cluster_id)
if db_node.project != db_cluster.project:
msg = _('Node and cluster are from different project, operation '
'is not allowed.')
raise exception.ProjectNotMatch(message=msg)
if db_node.profile_id != db_cluster.profile_id:
node_profile = self.profile_find(db_node.profile_id)
cluster_profile = self.profile_find(db_cluster.profile_id)
if node_profile.type != cluster_profile.type:
msg = _('Node and cluster have different profile type, '
'operation aborted.')
raise exception.ProfileTypeNotMatch(message=msg)
LOG.info(_LI('Joining node %(node)s to cluster %(cluster)s'),
{'node': identity, 'cluster': cluster_id})
action = action_mod.Action(context, 'NODE_JOIN',
name='node_join_%s' % db_node.id[:8],
target=db_node.id,
cause=action_mod.CAUSE_RPC,
inputs={'cluster_id': db_cluster.id})
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def node_leave(self, context, identity):
db_node = self.node_find(context, identity)
LOG.info(_LI('Node %(node)s leaving cluster'), {'node': identity})
action = action_mod.Action(context, 'NODE_LEAVE',
name='node_leave_%s' % db_node.id[:8],
target=db_node.id,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_policy_list(self, context, identity, filters=None,
sort_keys=None, sort_dir=None):
db_cluster = self.cluster_find(context, identity)
bindings = db_api.cluster_policy_get_all(context, db_cluster.id,
filters=filters,
sort_keys=sort_keys,
sort_dir=sort_dir)
result = []
for binding in bindings:
result.append({
'id': binding.id,
'cluster_id': binding.cluster_id,
'cluster_name': binding.cluster.name,
'policy_id': binding.policy_id,
'policy_name': binding.policy.name,
'policy_type': binding.policy.type,
'priority': binding.priority,
'level': binding.level,
'cooldown': binding.cooldown,
'enabled': binding.enabled,
})
return result
@request_context
def cluster_policy_get(self, context, identity, policy_id):
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy_id)
binding = db_api.cluster_policy_get(context, db_cluster.id,
db_policy.id)
return {
'id': binding.id,
'cluster_id': binding.cluster_id,
'cluster_name': binding.cluster.name,
'policy_id': binding.policy_id,
'policy_name': binding.policy.name,
'policy_type': binding.policy.type,
'priority': binding.priority,
'level': binding.level,
'cooldown': binding.cooldown,
'enabled': binding.enabled,
}
@request_context
def cluster_policy_attach(self, context, identity, policy, priority=None,
level=None, cooldown=None, enabled=True):
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy)
priority = utils.parse_int_param('priority', priority) or 50
level = utils.parse_int_param('level', level) or 50
cooldown = utils.parse_int_param('cooldown', cooldown) or 0
enabled = utils.parse_bool_param('cooldown', enabled)
LOG.info(_LI('Attaching policy %(policy)s to cluster %(cluster)s'),
{'policy': policy, 'cluster': identity})
inputs = {
'policy_id': db_policy.id,
'priority': priority,
'level': level,
'cooldown': cooldown,
'enabled': enabled,
}
action_name = 'cluster_attach_policy_%s' % db_cluster.id[:8]
action = action_mod.Action(context, consts.CLUSTER_ATTACH_POLICY,
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_policy_detach(self, context, identity, policy):
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy)
LOG.info(_LI('Detaching policy %(policy)s from cluster %(cluster)s'),
{'policy': policy, 'cluster': identity})
action_name = 'cluster_detach_policy_%s' % db_cluster.id[:8]
action = action_mod.Action(context, consts.CLUSTER_DETACH_POLICY,
name=action_name,
target=db_cluster.id,
inputs={'policy_id': db_policy.id},
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
@request_context
def cluster_policy_update(self, context, identity, policy, priority=None,
level=None, cooldown=None, enabled=None):
db_cluster = self.cluster_find(context, identity)
db_policy = self.policy_find(context, policy)
inputs = {'policy_id': db_policy.id}
if priority is not None:
inputs['priority'] = utils.parse_int_param('priority', priority)
if level is not None:
inputs['level'] = utils.parse_int_param('level', level)
if cooldown is not None:
inputs['cooldown'] = utils.parse_int_param('cooldown', cooldown)
if enabled is not None:
inputs['enabled'] = utils.parse_bool_param('cooldown', enabled)
LOG.info(_LI('Updating policy %(policy)s on cluster %(cluster)s'),
{'policy': policy, 'cluster': identity})
action_name = 'cluster_update_policy_%s' % db_cluster.id[:8]
action = action_mod.Action(context, consts.CLUSTER_UPDATE_POLICY,
name=action_name,
target=db_cluster.id,
inputs=inputs,
cause=action_mod.CAUSE_RPC)
action.store(context)
dispatcher.notify(context, self.dispatcher.NEW_ACTION,
None, action_id=action.id)
return {'action': action.id}
def action_find(self, context, identity):
'''Find an action with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
action = db_api.action_get(context, identity)
if not action:
action = db_api.action_get_by_name(context, identity)
else:
action = db_api.action_get_by_name(context, identity)
if not action:
action = db_api.action_get_by_short_id(context, identity)
if not action:
raise exception.ActionNotFound(action=identity)
return action
@request_context
def action_list(self, context, filters=None, limit=None, marker=None,
sort_keys=None, sort_dir=None, show_deleted=False):
limit = utils.parse_int_param('limit', limit)
show_deleted = utils.parse_bool_param('show_deleted', show_deleted)
all_actions = action_mod.Action.load_all(context, filters=filters,
limit=limit, marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
show_deleted=show_deleted)
results = []
for action in all_actions:
raw = action.to_dict()
del raw['context']
results.append(raw)
return results
@request_context
def action_create(self, context, name, target, action, params):
LOG.info(_LI('Creating action %s'), name)
# Create a node instance
act = action_mod.Action(context, action, target,
name=name, params=params)
act.store(context)
# TODO(Anyone): Uncomment this to notify the dispatcher
# dispatcher.notify(context, self.dispatcher.NEW_ACTION,
# None, action_id=action.id)
return act.to_dict()
@request_context
def action_get(self, context, identity):
db_action = self.action_find(context, identity)
action = action_mod.Action.load(context, action=db_action)
return action.to_dict()
def event_find(self, context, identity, show_deleted=False):
'''Find a event with the given identity (could be name or ID).'''
if uuidutils.is_uuid_like(identity):
event = db_api.event_get(context, identity)
if not event:
event = db_api.event_get_by_short_id(context, identity)
else:
event = db_api.event_get_by_short_id(context, identity)
if not event:
raise exception.EventNotFound(action=identity)
return event
@request_context
def event_list(self, context, filters=None, limit=None, marker=None,
sort_keys=None, sort_dir=None, tenant_safe=True,
show_deleted=False):
all_actions = event_mod.Event.load_all(context, filters=filters,
limit=limit, marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
tenant_safe=tenant_safe,
show_deleted=show_deleted)
results = [action.to_dict() for action in all_actions]
return results
@request_context
def event_get(self, context, identity):
db_event = self.event_find(context, identity)
event = event_mod.Event.load(context, db_event=db_event)
return event.to_dict()
|
|
"""
Interface to Constrained Optimization By Linear Approximation
Functions
---------
.. autosummary::
:toctree: generated/
fmin_cobyla
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import callable
from scipy.optimize import _cobyla
from .optimize import OptimizeResult, _check_unknown_options
__all__ = ['fmin_cobyla']
def fmin_cobyla(func, x0, cons, args=(), consargs=None, rhobeg=1.0,
rhoend=1e-4, iprint=1, maxfun=1000, disp=None, catol=2e-4):
"""
Minimize a function using the Constrained Optimization BY Linear
Approximation (COBYLA) method. This method wraps a FORTRAN
implentation of the algorithm.
Parameters
----------
func : callable
Function to minimize. In the form func(x, \\*args).
x0 : ndarray
Initial guess.
cons : sequence
Constraint functions; must all be ``>=0`` (a single function
if only 1 constraint). Each function takes the parameters `x`
as its first argument.
args : tuple
Extra arguments to pass to function.
consargs : tuple
Extra arguments to pass to constraint functions (default of None means
use same extra arguments as those passed to func).
Use ``()`` for no extra arguments.
rhobeg :
Reasonable initial changes to the variables.
rhoend :
Final accuracy in the optimization (not precisely guaranteed). This
is a lower bound on the size of the trust region.
iprint : {0, 1, 2, 3}
Controls the frequency of output; 0 implies no output. Deprecated.
disp : {0, 1, 2, 3}
Over-rides the iprint interface. Preferred.
maxfun : int
Maximum number of function evaluations.
catol : float
Absolute tolerance for constraint violations.
Returns
-------
x : ndarray
The argument that minimises `f`.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'COBYLA' `method` in particular.
Notes
-----
This algorithm is based on linear approximations to the objective
function and each constraint. We briefly describe the algorithm.
Suppose the function is being minimized over k variables. At the
jth iteration the algorithm has k+1 points v_1, ..., v_(k+1),
an approximate solution x_j, and a radius RHO_j.
(i.e. linear plus a constant) approximations to the objective
function and constraint functions such that their function values
agree with the linear approximation on the k+1 points v_1,.., v_(k+1).
This gives a linear program to solve (where the linear approximations
of the constraint functions are constrained to be non-negative).
However the linear approximations are likely only good
approximations near the current simplex, so the linear program is
given the further requirement that the solution, which
will become x_(j+1), must be within RHO_j from x_j. RHO_j only
decreases, never increases. The initial RHO_j is rhobeg and the
final RHO_j is rhoend. In this way COBYLA's iterations behave
like a trust region algorithm.
Additionally, the linear program may be inconsistent, or the
approximation may give poor improvement. For details about
how these issues are resolved, as well as how the points v_i are
updated, refer to the source code or the references below.
References
----------
Powell M.J.D. (1994), "A direct search optimization method that models
the objective and constraint functions by linear interpolation.", in
Advances in Optimization and Numerical Analysis, eds. S. Gomez and
J-P Hennart, Kluwer Academic (Dordrecht), pp. 51-67
Powell M.J.D. (1998), "Direct search algorithms for optimization
calculations", Acta Numerica 7, 287-336
Powell M.J.D. (2007), "A view of algorithms for optimization without
derivatives", Cambridge University Technical Report DAMTP 2007/NA03
Examples
--------
Minimize the objective function f(x,y) = x*y subject
to the constraints x**2 + y**2 < 1 and y > 0::
>>> def objective(x):
... return x[0]*x[1]
...
>>> def constr1(x):
... return 1 - (x[0]**2 + x[1]**2)
...
>>> def constr2(x):
... return x[1]
...
>>> fmin_cobyla(objective, [0.0, 0.1], [constr1, constr2], rhoend=1e-7)
Normal return from subroutine COBYLA
NFVALS = 64 F =-5.000000E-01 MAXCV = 1.998401E-14
X =-7.071069E-01 7.071067E-01
array([-0.70710685, 0.70710671])
The exact solution is (-sqrt(2)/2, sqrt(2)/2).
"""
err = "cons must be a sequence of callable functions or a single"\
" callable function."
try:
len(cons)
except TypeError:
if callable(cons):
cons = [cons]
else:
raise TypeError(err)
else:
for thisfunc in cons:
if not callable(thisfunc):
raise TypeError(err)
if consargs is None:
consargs = args
# build constraints
con = tuple({'type': 'ineq', 'fun': c, 'args': consargs} for c in cons)
# options
if disp is not None:
iprint = disp
opts = {'rhobeg': rhobeg,
'tol': rhoend,
'iprint': iprint,
'disp': iprint != 0,
'maxiter': maxfun,
'catol': catol}
sol = _minimize_cobyla(func, x0, args, constraints=con,
**opts)
if iprint > 0 and not sol['success']:
print("COBYLA failed to find a solution: %s" % (sol.message,))
return sol['x']
def _minimize_cobyla(fun, x0, args=(), constraints=(),
rhobeg=1.0, tol=1e-4, iprint=1, maxiter=1000,
disp=False, catol=2e-4, **unknown_options):
"""
Minimize a scalar function of one or more variables using the
Constrained Optimization BY Linear Approximation (COBYLA) algorithm.
Options for the COBYLA algorithm are:
rhobeg : float
Reasonable initial changes to the variables.
tol : float
Final accuracy in the optimization (not precisely guaranteed).
This is a lower bound on the size of the trust region.
disp : bool
Set to True to print convergence messages. If False,
`verbosity` is ignored as set to 0.
maxiter : int
Maximum number of function evaluations.
catol : float
Tolerance (absolute) for constraint violations
This function is called by the `minimize` function with
`method=COBYLA`. It is not supposed to be called directly.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
rhoend = tol
if not disp:
iprint = 0
# check constraints
if isinstance(constraints, dict):
constraints = (constraints, )
for ic, con in enumerate(constraints):
# check type
try:
ctype = con['type'].lower()
except KeyError:
raise KeyError('Constraint %d has no type defined.' % ic)
except TypeError:
raise TypeError('Constraints must be defined using a '
'dictionary.')
except AttributeError:
raise TypeError("Constraint's type must be a string.")
else:
if ctype != 'ineq':
raise ValueError("Constraints of type '%s' not handled by "
"COBYLA." % con['type'])
# check function
if 'fun' not in con:
raise KeyError('Constraint %d has no function defined.' % ic)
# check extra arguments
if 'args' not in con:
con['args'] = ()
m = len(constraints)
def calcfc(x, con):
f = fun(x, *args)
for k, c in enumerate(constraints):
con[k] = c['fun'](x, *c['args'])
return f
info = np.zeros(4, np.float64)
xopt, info = _cobyla.minimize(calcfc, m=m, x=np.copy(x0), rhobeg=rhobeg,
rhoend=rhoend, iprint=iprint, maxfun=maxfun,
dinfo=info)
if info[3] > catol:
# Check constraint violation
info[0] = 4
return OptimizeResult(x=xopt,
status=int(info[0]),
success=info[0] == 1,
message={1: 'Optimization terminated successfully.',
2: 'Maximum number of function evaluations has '
'been exceeded.',
3: 'Rounding errors are becoming damaging in '
'COBYLA subroutine.',
4: 'Did not converge to a solution satisfying '
'the constraints. See `maxcv` for magnitude '
'of violation.'
}.get(info[0], 'Unknown exit status.'),
nfev=int(info[1]),
fun=info[2],
maxcv=info[3])
if __name__ == '__main__':
from math import sqrt
def fun(x):
return x[0] * x[1]
def cons(x):
return 1 - x[0]**2 - x[1]**2
x = fmin_cobyla(fun, [1., 1.], cons, iprint=3, disp=1)
print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)))
|
|
# -*- coding: utf-8 -*-
"""
Display number of todos and more for Thunderbird.
Configuration parameters:
cache_timeout: refresh interval for this module (default 60)
format: display format for this module (default '{format_todo}')
format_datetime: specify strftime formatting to use (default {})
format_separator: show separator if more than one (default ' ')
format_todo: display format for todos
(default '\?if=!todo_completed {title}')
profile: specify a profile path, otherwise first available profile
eg '~/.thunderbird/abcd1234.default' (default None)
sort: specify a tuple, eg ('placeholder_name', reverse_boolean)
to sort by; excluding placeholder indexes (default ())
thresholds: specify color thresholds to use (default [])
Format placeholders:
{todo_total} eg 5
{todo_completed} eg 2
{todo_incompleted} eg 3
{format_todo} format for todos
format_todo placeholders:
{index_total} eg 1, 2, 3
{index_completed} eg 1, 2, 3
{index_incompleted} eg 1, 2, 3
{alarm_last_ack} eg None, 1513291952000000
{cal_id} eg 966bd855-5e71-4168-8072-c98f244ed825
{flags} eg 4, 276
{ical_status} eg None, IN-PROCESS, COMPLETED
{id} eg 87e9bfc9-eaad-4aa6-ad5f-adbf6d7a11a5
{last_modified} eg 1513276147000000
{offline_journal} eg None
{priority} eg None, # None=None, 0=None, 1=High, 5=Normal, 9=Low
{privacy} eg None, CONFIDENTIAL
{recurrence_id} eg None
{recurrence_id_tz} eg None, UTC
{time_created} eg 1513276147000000
{title} eg New Task
{todo_complete} eg None
{todo_completed} eg None, 1513281528000000
{todo_completed_tz} eg None, UTC
{todo_due} eg None, 1513292400000000
{todo_due_tz} eg None, America/Chicago
{todo_entry} eg None, 1513292400000000
{todo_entry_tz} eg None, America/Chicago
{todo_stamp} eg 1513276147000000
format_datetime placeholders:
KEY: alarm_last_ack, last_modified, time_created, todo,
todo_completed, todo_entry, todo_stamp
VALUE: % strftime characters to be translated, eg '%b %d' ----> 'Dec 14'
SEE EXAMPLE BELOW: "show incompleted titles with last modified time"
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Requires:
thunderbird: standalone mail and news reader
Examples:
```
# show number of incompleted titles
thunderbird_todos {
format = '{todo_incompleted} incompleted todos'
}
# show rainbow number of incompleted titles
thunderbird_todos {
format = '\?color=todo_incompleted {todo_incompleted} todos'
thresholds = [
(1, '#bababa'), (2, '#ffb3ba'), (3, '#ffdfba'), (4, '#ffffba'),
(5, '#baefba'), (6, '#baffc9'), (7, '#bae1ff'), (8, '#bab3ff')
]
}
# show rainbow incompleted titles
thunderbird_todos {
format_todo = '\?if=!todo_completed&color=index_incompleted {title}'
thresholds = [
(1, '#bababa'), (2, '#ffb3ba'), (3, '#ffdfba'), (4, '#ffffba'),
(5, '#baefba'), (6, '#baffc9'), (7, '#bae1ff'), (8, '#bab3ff')
]
}
# show incompleted titles with last modified time
thunderbird_todos {
format_todo = '\?if=!todo_completed {title} {last_modified}'
format_datetime = {
'last_modified': '\?color=degraded last modified %-I:%M%P'
}
}
# show 'No todos'
thunderbird_todos {
format = '{format_todo}|No todos'
}
# show completed titles and incompleted titles
thunderbird_todos {
format_todo = '\?if=todo_completed&color=good {title}|\?color=bad {title}'
}
# make todo blocks
thunderbird_todos {
format = 'TODO {format_todo}'
format_todo = '\?if=todo_completed&color=good \u25b0|\?color=bad \u25b0'
format_separator = ''
}
# display incompleted titles with any priority
thunderbird_todos {
format_todo = '\?if=!todo_completed [\?if=priority>0 {title}]'
}
# colorize titles based on priorities
thunderbird_todos {
format_todo = '\?if=!todo_completed [\?color=priority {title}]'
thresholds = [(0, None), (1, 'red'), (5, None), (9, 'deepskyblue')]
}
# sort todos
thunderbird_todos {
sort = ('last_modified', True) # sort by modified time: recent first
sort = ('priority', True) # sort by priority: high to low
sort = ('title', False) # sort by title: ABC to abc
}
# add your snippets here
thunderbird_todos {
format = '...'
}
```
@author mrt-prodz, lasers
SAMPLE OUTPUT
{'full_text': 'New Task 1, New Task 2'}
"""
from os import path
from sqlite3 import connect
from datetime import datetime
STRING_NO_PROFILE = "missing profile"
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 60
format = "{format_todo}"
format_datetime = {}
format_separator = " "
format_todo = "\?if=!todo_completed {title}"
profile = None
sort = ()
thresholds = []
def post_config_hook(self):
if not self.py3.check_commands("thunderbird"):
raise Exception(STRING_NOT_INSTALLED)
# first profile, please.
if not self.profile:
directory = "~/.thunderbird"
profile_ini = path.expanduser(directory + "/profiles.ini")
profile = []
for line in open(profile_ini):
if line.startswith("Path="):
profile.append(
"{}/{}".format(directory, line.split("Path=")[-1].strip())
)
if not len(profile):
raise Exception(STRING_NO_PROFILE)
self.profile = profile[0]
self.profile = path.expanduser(self.profile)
self.path = self.profile + "/calendar-data/local.sqlite"
self.init_datetimes = []
for word in self.format_datetime:
if (self.py3.format_contains(self.format_todo, word)) and (
word in self.format_datetime
):
self.init_datetimes.append(word)
self.thresholds_init = {}
for name in ["format", "format_todo"]:
self.thresholds_init[name] = self.py3.get_color_names_list(
getattr(self, name)
)
def _get_thunderbird_todos_data(self):
connection = connect(self.path)
cursor = connection.cursor()
cursor.execute("SELECT * FROM cal_todos")
keys = [desc[0] for desc in cursor.description]
todos_data = cursor.fetchall()
cursor.close()
connection.close()
return [dict(zip(keys, values)) for values in todos_data]
def _organize(self, data):
# sort?
if self.sort:
data = sorted(data, key=lambda k: k[self.sort[0]], reverse=self.sort[1])
# counts and indexes
count = {"todo_total": 0, "todo_completed": 0, "todo_incompleted": 0}
for todo_index, todo in enumerate(data, 1):
count["todo_total"] += 1
todo["index_total"] = todo_index
todo["index_completed"] = todo["index_incompleted"] = None
if todo["todo_completed"]:
count["todo_completed"] += 1
todo["index_completed"] = count["todo_completed"]
else:
count["todo_incompleted"] += 1
todo["index_incompleted"] = count["todo_incompleted"]
return data, count
def _manipulate(self, data, count):
new_data = []
for todo in data:
# datetimes
for k in self.init_datetimes:
if k in todo:
todo[k] = self.py3.safe_format(
datetime.strftime(
datetime.fromtimestamp(float(str(todo[k])[:-6])),
self.format_datetime[k],
)
)
# thresholds
for x in self.thresholds_init["format_todo"]:
if x in todo:
self.py3.threshold_get_color(todo[x], x)
new_data.append(self.py3.safe_format(self.format_todo, todo))
for x in self.thresholds_init["format"]:
if x in count:
self.py3.threshold_get_color(count[x], x)
format_separator = self.py3.safe_format(self.format_separator)
format_todo = self.py3.composite_join(format_separator, new_data)
return format_todo
def thunderbird_todos(self):
todo_data = self._get_thunderbird_todos_data()
data, count = self._organize(todo_data)
format_todo = self._manipulate(data, count)
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format, dict(format_todo=format_todo, **count)
),
}
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
|
# -*- coding: utf-8 -*-
import markupsafe
from django.db import models
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.bitbucket.api import BitbucketClient
from addons.bitbucket.serializer import BitbucketSerializer
from addons.bitbucket import settings as bitbucket_settings
from addons.bitbucket.exceptions import NotFoundError
from framework.auth import Auth
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = bitbucket_settings.HOOK_DOMAIN or settings.DOMAIN
class BitbucketFileNode(BaseFileNode):
_provider = 'bitbucket'
class BitbucketFolder(BitbucketFileNode, Folder):
pass
class BitbucketFile(BitbucketFileNode, File):
version_identifier = 'commitSha'
def touch(self, auth_header, revision=None, commitSha=None, branch=None, **kwargs):
revision = revision or commitSha or branch
return super(BitbucketFile, self).touch(auth_header, revision=revision, **kwargs)
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
class BitbucketProvider(ExternalProvider):
"""Provider to handler Bitbucket OAuth workflow
API Docs::
* https://developer.atlassian.com/bitbucket/api/2/reference/meta/authentication
* https://confluence.atlassian.com/bitbucket/oauth-on-bitbucket-cloud-238027431.html
"""
name = 'Bitbucket'
short_name = 'bitbucket'
client_id = bitbucket_settings.CLIENT_ID
client_secret = bitbucket_settings.CLIENT_SECRET
auth_url_base = bitbucket_settings.OAUTH_AUTHORIZE_URL
callback_url = bitbucket_settings.OAUTH_ACCESS_TOKEN_URL
default_scopes = bitbucket_settings.SCOPE
auto_refresh_url = callback_url
refresh_time = bitbucket_settings.REFRESH_TIME
expiry_time = bitbucket_settings.EXPIRY_TIME
def handle_callback(self, response):
"""View called when the OAuth flow is completed. Adds a new BitbucketUserSettings
record to the user and saves the account info.
"""
client = BitbucketClient(access_token=response['access_token'])
user_info = client.user()
return {
'provider_id': user_info['uuid'],
'profile_url': user_info['links']['html']['href'],
'display_name': user_info['username']
}
def fetch_access_token(self, force_refresh=False):
self.refresh_oauth_key(force=force_refresh)
return self.account.oauth_key
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific bitbucket information
Quirks::
* Bitbucket does not support remote revocation of access tokens.
"""
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
# Required for importing username from social profile configuration page
# Assumes oldest connected account is primary.
@property
def public_id(self):
bitbucket_accounts = self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
if bitbucket_accounts:
return bitbucket_accounts[0].display_name
return None
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""Authenticated ExternalProvider instance"""
if self._api is None:
self._api = BitbucketProvider(self.external_account)
return self._api
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='bitbucket_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.hook_id = None
def deauthorize(self, auth=None, log=True):
# self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='bitbucket_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.user and self.repo:
return 'https://bitbucket.org/{0}/{1}/'.format(
self.user, self.repo
)
@property
def short_url(self):
if self.user and self.repo:
return '/'.join([self.user, self.repo])
@property
def is_private(self):
repo = self.fetch_repo()
if repo:
return repo['is_private']
return None
def fetch_repo(self):
connection = BitbucketClient(access_token=self.api.fetch_access_token())
return connection.repo(user=self.user, repo=self.repo)
def fetch_access_token(self):
return self.api.fetch_access_token()
# TODO: Delete me and replace with serialize_settings / Knockout
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('bitbucket')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
connection = BitbucketClient(access_token=self.api.fetch_access_token())
valid_credentials = True
try:
mine = connection.repos()
repo_names = [
repo['full_name'].replace('/', ' / ')
for repo in mine
]
except Exception:
repo_names = []
valid_credentials = False
owner = self.user_settings.owner
if owner == user:
ret.update({'repo_names': repo_names})
ret.update({
'node_has_auth': True,
'bitbucket_user': self.user or '',
'bitbucket_repo': self.repo or '',
'bitbucket_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'bitbucket_user_name': self.external_account.display_name,
'bitbucket_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.api.fetch_access_token()}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'owner': self.user,
'repo': self.repo,
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='bitbucket')
sha, urls = None, {}
try:
sha = metadata['extra']['commitSha']
urls = {
'view': '{0}?commitSha={1}'.format(url, sha),
'download': '{0}?action=download&commitSha={1}'.format(url, sha)
}
except KeyError:
pass
self.owner.add_log(
'bitbucket_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'bitbucket': {
'user': self.user,
'repo': self.repo,
'commitSha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor_or_group_member(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
repo_data = self.fetch_repo()
if repo_data:
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if repo_data['is_private'] else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the Bitbucket '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private Bitbucket '
'repository through this public project.'
)
else:
message += (
' The files in this Bitbucket repo can be viewed on Bitbucket '
'<u><a href="https://bitbucket.org/{user}/{repo}/">here</a></u>.'
).format(
user=self.user,
repo=self.repo,
)
messages.append(message)
else:
message = (
'Warning: the Bitbucket repo {user} / {repo} connected to this OSF {category} has been deleted.'.format(
category=markupsafe.escape(node.project_or_component),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'bitbucket/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the Bitbucket add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""Hook to run after forking a project. If the forking user is not
the same as the original authorizing user, the Bitbucket
credentials will *not* be copied over.
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private Bitbucket repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on Bitbucket.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
|
|
from contextlib import contextmanager
import tracemalloc
import numpy as np
import pytest
from pandas._libs import hashtable as ht
import pandas._testing as tm
@contextmanager
def activated_tracemalloc():
tracemalloc.start()
try:
yield
finally:
tracemalloc.stop()
def get_allocated_khash_memory():
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces(
(tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)
)
return sum(map(lambda x: x.size, snapshot.traces))
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.PyObjectHashTable, np.object_),
(ht.Complex128HashTable, np.complex128),
(ht.Int64HashTable, np.int64),
(ht.UInt64HashTable, np.uint64),
(ht.Float64HashTable, np.float64),
(ht.Complex64HashTable, np.complex64),
(ht.Int32HashTable, np.int32),
(ht.UInt32HashTable, np.uint32),
(ht.Float32HashTable, np.float32),
(ht.Int16HashTable, np.int16),
(ht.UInt16HashTable, np.uint16),
(ht.Int8HashTable, np.int8),
(ht.UInt8HashTable, np.uint8),
],
)
class TestHashTable:
def test_get_set_contains_len(self, table_type, dtype):
index = 5
table = table_type(55)
assert len(table) == 0
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index + 1, 41)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 42
assert table.get_item(index + 1) == 41
table.set_item(index, 21)
assert index in table
assert index + 1 in table
assert len(table) == 2
assert table.get_item(index) == 21
assert table.get_item(index + 1) == 41
assert index + 2 not in table
with pytest.raises(KeyError, match=str(index + 2)):
table.get_item(index + 2)
def test_map(self, table_type, dtype, writable):
# PyObjectHashTable has no map-method
if table_type != ht.PyObjectHashTable:
N = 77
table = table_type()
keys = np.arange(N).astype(dtype)
vals = np.arange(N).astype(np.int64) + N
keys.flags.writeable = writable
vals.flags.writeable = writable
table.map(keys, vals)
for i in range(N):
assert table.get_item(keys[i]) == i + N
def test_map_locations(self, table_type, dtype, writable):
N = 8
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
for i in range(N):
assert table.get_item(keys[i]) == i
def test_lookup(self, table_type, dtype, writable):
N = 3
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
keys.flags.writeable = writable
table.map_locations(keys)
result = table.lookup(keys)
expected = np.arange(N)
tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))
def test_lookup_wrong(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 100
else:
N = 512
table = table_type()
keys = (np.arange(N) + N).astype(dtype)
table.map_locations(keys)
wrong_keys = np.arange(N).astype(dtype)
result = table.lookup(wrong_keys)
assert np.all(result == -1)
def test_unique(self, table_type, dtype, writable):
if dtype in (np.int8, np.uint8):
N = 88
else:
N = 1000
table = table_type()
expected = (np.arange(N) + N).astype(dtype)
keys = np.repeat(expected, 5)
keys.flags.writeable = writable
unique = table.unique(keys)
tm.assert_numpy_array_equal(unique, expected)
def test_tracemalloc_works(self, table_type, dtype):
if dtype in (np.int8, np.uint8):
N = 256
else:
N = 30000
keys = np.arange(N).astype(dtype)
with activated_tracemalloc():
table = table_type()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty(self, table_type, dtype):
with activated_tracemalloc():
table = table_type()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_get_labels_groupby_for_Int64(writable):
table = ht.Int64HashTable()
vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
vals.flags.writeable = writable
arr, unique = table.get_labels_groupby(vals)
expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.int64)
expected_unique = np.array([1, 2], dtype=np.int64)
tm.assert_numpy_array_equal(arr.astype(np.int64), expected_arr)
tm.assert_numpy_array_equal(unique, expected_unique)
def test_tracemalloc_works_for_StringHashTable():
N = 1000
keys = np.arange(N).astype(np.compat.unicode).astype(np.object_)
with activated_tracemalloc():
table = ht.StringHashTable()
table.map_locations(keys)
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
def test_tracemalloc_for_empty_StringHashTable():
with activated_tracemalloc():
table = ht.StringHashTable()
used = get_allocated_khash_memory()
my_size = table.sizeof()
assert used == my_size
del table
assert get_allocated_khash_memory() == 0
@pytest.mark.parametrize(
"table_type, dtype",
[
(ht.Float64HashTable, np.float64),
(ht.Float32HashTable, np.float32),
(ht.Complex128HashTable, np.complex128),
(ht.Complex64HashTable, np.complex64),
],
)
class TestHashTableWithNans:
def test_get_set_contains_len(self, table_type, dtype):
index = float("nan")
table = table_type()
assert index not in table
table.set_item(index, 42)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 42
table.set_item(index, 41)
assert len(table) == 1
assert index in table
assert table.get_item(index) == 41
def test_map(self, table_type, dtype):
N = 332
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
vals = (np.arange(N) + N).astype(np.int64)
table.map(keys, vals)
assert len(table) == 1
assert table.get_item(np.nan) == 2 * N - 1
def test_map_locations(self, table_type, dtype):
N = 10
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
table.map_locations(keys)
assert len(table) == 1
assert table.get_item(np.nan) == N - 1
def test_unique(self, table_type, dtype):
N = 1020
table = table_type()
keys = np.full(N, np.nan, dtype=dtype)
unique = table.unique(keys)
assert np.all(np.isnan(unique)) and len(unique) == 1
def get_ht_function(fun_name, type_suffix):
return getattr(ht, fun_name + "_" + type_suffix)
@pytest.mark.parametrize(
"dtype, type_suffix",
[
(np.object_, "object"),
(np.complex128, "complex128"),
(np.int64, "int64"),
(np.uint64, "uint64"),
(np.float64, "float64"),
(np.complex64, "complex64"),
(np.int32, "int32"),
(np.uint32, "uint32"),
(np.float32, "float32"),
(np.int16, "int16"),
(np.uint16, "uint16"),
(np.int8, "int8"),
(np.uint8, "uint8"),
],
)
class TestHelpFunctions:
def test_value_count(self, dtype, type_suffix, writable):
N = 43
value_count = get_ht_function("value_count", type_suffix)
expected = (np.arange(N) + N).astype(dtype)
values = np.repeat(expected, 5)
values.flags.writeable = writable
keys, counts = value_count(values, False)
tm.assert_numpy_array_equal(np.sort(keys), expected)
assert np.all(counts == 5)
def test_duplicated_first(self, dtype, type_suffix, writable):
N = 100
duplicated = get_ht_function("duplicated", type_suffix)
values = np.repeat(np.arange(N).astype(dtype), 5)
values.flags.writeable = writable
result = duplicated(values)
expected = np.ones_like(values, dtype=np.bool_)
expected[::5] = False
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, type_suffix, writable):
N = 127
ismember = get_ht_function("ismember", type_suffix)
arr = np.arange(N).astype(dtype)
values = np.arange(N).astype(dtype)
arr.flags.writeable = writable
values.flags.writeable = writable
result = ismember(arr, values)
expected = np.ones_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype, type_suffix):
N = 17
ismember = get_ht_function("ismember", type_suffix)
arr = np.arange(N).astype(dtype)
values = (np.arange(N) + N).astype(dtype)
result = ismember(arr, values)
expected = np.zeros_like(values, dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, type_suffix, writable):
if dtype in (np.int8, np.uint8):
N = 53
else:
N = 11111
mode = get_ht_function("mode", type_suffix)
values = np.repeat(np.arange(N).astype(dtype), 5)
values[0] = 42
values.flags.writeable = writable
result = mode(values, False)
assert result == 42
@pytest.mark.parametrize(
"dtype, type_suffix",
[
(np.float64, "float64"),
(np.float32, "float32"),
(np.complex128, "complex128"),
(np.complex64, "complex64"),
],
)
class TestHelpFunctionsWithNans:
def test_value_count(self, dtype, type_suffix):
value_count = get_ht_function("value_count", type_suffix)
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
keys, counts = value_count(values, True)
assert len(keys) == 0
keys, counts = value_count(values, False)
assert len(keys) == 1 and np.all(np.isnan(keys))
assert counts[0] == 3
def test_duplicated_first(self, dtype, type_suffix):
duplicated = get_ht_function("duplicated", type_suffix)
values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
result = duplicated(values)
expected = np.array([False, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_ismember_yes(self, dtype, type_suffix):
ismember = get_ht_function("ismember", type_suffix)
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([np.nan, np.nan], dtype=dtype)
result = ismember(arr, values)
expected = np.array([True, True, True], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_ismember_no(self, dtype, type_suffix):
ismember = get_ht_function("ismember", type_suffix)
arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
values = np.array([1], dtype=dtype)
result = ismember(arr, values)
expected = np.array([False, False, False], dtype=np.bool_)
tm.assert_numpy_array_equal(result, expected)
def test_mode(self, dtype, type_suffix):
mode = get_ht_function("mode", type_suffix)
values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
assert mode(values, True) == 42
assert np.isnan(mode(values, False))
|
|
__file__ = 'OffSystem_v1'
__date__ = '5/29/14'
__author__ = 'ABREZNIC'
import os, arcpy, xlwt, datetime
#date
now = datetime.datetime.now()
curMonth = now.strftime("%m")
curDay = now.strftime("%d")
curYear = now.strftime("%Y")
today = curYear + "_" + curMonth + "_" + curDay
#variables
qcfolder = "C:\\TxDOT\\QC\\OffSystem"
roadways = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways"
where = """ RTE_CLASS = '2' OR RTE_CLASS = '3' """
subfiles = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"
cities = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.City\\TPP_GIS.APP_TPP_GIS_ADMIN.City"
districts = "Database Connections\\Connection to Comanche.sde\\TPP_GIS.APP_TPP_GIS_ADMIN.District\\TPP_GIS.APP_TPP_GIS_ADMIN.District"
workspace = qcfolder + "\\" + today
if not os.path.exists(workspace):
os.makedirs(workspace)
else:
for file in os.listdir(workspace):
thefile = os.path.join(workspace, file)
os.remove(thefile)
#print "Folder already exists for today. Please ether rename or delete the QC folder with today's date."
def overlap():
print "starting " + str(now)
arcpy.Select_analysis(roadways, workspace + "\\FC_Streets.shp", """ RTE_CLASS = '3' """)
arcpy.Erase_analysis(workspace + "\\FC_Streets.shp", cities, workspace + "\\FC_Streets_Errors.shp")
print "fc"
arcpy.Clip_analysis(roadways, cities, workspace + "\\City_Roads.shp")
print "City"
arcpy.Select_analysis(workspace + "\\City_Roads.shp", workspace + "\\County_Roads_Errors.shp", """ RTE_CLASS = '2' """)
print "cr select"
arcpy.Merge_management([workspace + "\\County_Roads_Errors.shp", workspace + "\\FC_Streets_Errors.shp"], workspace + "\\MergedErrors.shp")
print "merge"
arcpy.SpatialJoin_analysis(workspace + "\\MergedErrors.shp", districts, workspace + "\\City_OverlapErrors.shp")
print "SJ"
arcpy.Delete_management(workspace + "\\City_Roads.shp")
arcpy.Delete_management(workspace + "\\FC_Streets.shp")
arcpy.Delete_management(workspace + "\\County_Roads_Errors.shp")
arcpy.Delete_management(workspace + "\\FC_Streets_Errors.shp")
arcpy.Delete_management(workspace + "\\MergedErrors.shp")
print "end " + str(now)
errors = []
cursor = arcpy.UpdateCursor(workspace + "\\City_OverlapErrors.shp")
for row in cursor:
geom = row.shape
len = geom.length * .000621371
row.setValue("RTE_LEN", len)
cursor.updateRow(row)
rowinfo = [row.RTE_ID, row.RTE_LEN, row.DIST_NM, row.DIST_NBR]
errors.append(rowinfo)
del cursor
del row
return errors
def routeopen():
cursor = arcpy.SearchCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
if row.RTE_OPEN == 1:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 4:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 1 requires HIGHWAY_STATUS = 4")
errors.append(errorinfo)
elif row.RTE_OPEN == 0:
rte_subfiles = arcpy.SearchCursor(subfiles, "RTE_ID = '" + id + "'")
for record in rte_subfiles:
status = record.HIGHWAY_STATUS
if status != 0:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append(status)
errorinfo.append("RTE_OPEN = 0 requires HIGHWAY_STATUS = 0")
errors.append(errorinfo)
else:
errorinfo.append(id)
errorinfo.append(row.RTE_OPEN)
errorinfo.append("N/A")
errorinfo.append("RTE_OPEN must be 1 or 0")
errors.append(errorinfo)
return errors
del cursor
del row
def measurelength():
cursor = arcpy.UpdateCursor(roadways, where)
errors = []
for row in cursor:
errorinfo = []
id = row.RTE_ID
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
Mdiff = abs(Mmax - Mmin)
wholelen = geom.length * .000621371
shp_len = round(wholelen, 3)
rte_len = row.RTE_LEN
testlen = abs(shp_len - Mdiff)
if testlen <= .003 and abs(rte_len - testlen) > .003:
row.setValue("RTE_LEN", wholelen)
cursor.updateRow(row)
elif abs(shp_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(rte_len - Mdiff) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
elif abs(shp_len - rte_len) > .003:
errorinfo.append(id)
errorinfo.append(Mdiff)
errorinfo.append(shp_len)
errorinfo.append(rte_len)
errors.append(errorinfo)
else:
pass
return errors
del cursor
del row
def subfilelength():
dictionary = {}
cursor = arcpy.SearchCursor(roadways, where)
for row in cursor:
id = row.RTE_ID
len = row.RTE_LEN
geom = row.shape
ext = geom.extent
Mmin = round(ext.MMin, 3)
Mmax = round(ext.MMax, 3)
if id not in dictionary.keys():
dictionary[str(id)] = [len, Mmin, Mmax]
else:
currentrecord = dictionary[id]
currentlength = currentrecord[0]
currentmin = currentrecord[1]
currentmax = currentrecord[2]
newlen = currentlength + len
if Mmin < currentmin:
currentmin = Mmin
if Mmax > currentmax:
currentmax = Mmax
dictionary[str(id)] = [newlen, currentmin, currentmax]
del cursor
del row
errors = []
for i in dictionary.keys():
firstflag = 0
sublength = 0
linevalues = dictionary[i]
linelen = linevalues[0]
linemin = linevalues[1]
linemax = linevalues[2]
cursor = arcpy.SearchCursor(subfiles, "RTE_ID = '" + i + "'", "", "", "BMP A")
for row in cursor:
if firstflag == 0:
bmp1 = row.BMP
firstflag += 1
bmp = row.BMP
emp = row.EMP
sublength += row.LEN_OF_SECTION
dist = row.DISTRICT
if abs((emp-bmp) - row.LEN_OF_SECTION) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append("")
errorinfo.append(sublength)
errorinfo.append("")
errorinfo.append("BMP and EMP difference does not equal the LEN_OF_SECTION. OBJECTID: " + row.OBJECTID)
errors.append(errorinfo)
if abs(linelen - sublength) > .003:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append(sublength)
errorinfo.append(linelen)
errorinfo.append("RTE_LEN does not equal SUBFILES total LEN_OF_SECTION")
errors.append(errorinfo)
if abs(linemin - bmp1) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append(bmp1)
errorinfo.append(linemin)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line minimum measure does not equal starting BMP")
errors.append(errorinfo)
if abs(linemax - emp) > .001:
errorinfo = []
errorinfo.append(i)
errorinfo.append(dist)
errorinfo.append("")
errorinfo.append("")
errorinfo.append(emp)
errorinfo.append(linemax)
errorinfo.append("")
errorinfo.append("")
errorinfo.append("Line maximum measure does not equal ending EMP")
errors.append(errorinfo)
return errors
def assemblereport():
book = xlwt.Workbook()
print "Overlap Errors..."
overlapsheet = book.add_sheet("City Boundary Overlap")
line = 0
overlapsheet.write(line, 0, "The following Route IDs are County Roads and FC Streets which cross a City Boundary as found in City_OverlapErrors.shp")
line += 1
overlapsheet.write(line, 0, "RTE_ID")
overlapsheet.write(line, 1, "Overlap Length")
overlapsheet.write(line, 2, "District Name")
overlapsheet.write(line, 3, "District Number")
line += 1
overlaplist = overlap()
for i in overlaplist:
overlapsheet.write(line, 0, i[0])
overlapsheet.write(line, 1, i[1])
overlapsheet.write(line, 2, i[2])
overlapsheet.write(line, 3, i[3])
line += 1
print "Route Open Errors..."
opensheet = book.add_sheet("Route Open")
line = 0
opensheet.write(line, 0, "The following Route IDs contain an error between RTE_OPEN in TxDOT_Roadways and ROADWAY_STATUS in SUBFILES")
line += 1
opensheet.write(line, 0, "RTE_ID")
opensheet.write(line, 1, "RTE_OPEN")
opensheet.write(line, 2, "HIGHWAY_STATUS")
opensheet.write(line, 3, "Description")
line += 1
openlist = routeopen()
for i in openlist:
opensheet.write(line, 0, i[0])
opensheet.write(line, 1, i[1])
opensheet.write(line, 2, i[2])
opensheet.write(line, 3, i[3])
line += 1
print "Geometry and Measure Errors..."
geomsheet = book.add_sheet("Geometry and Measures")
line = 0
geomsheet.write(line, 0, "The following Route IDs contain an error between their measures' length, shape length, and RTE_LEN")
line += 1
geomsheet.write(line, 0, "RTE_ID")
geomsheet.write(line, 1, "Measures' Length")
geomsheet.write(line, 2, "Shape Length")
geomsheet.write(line, 3, "RTE_LEN")
line += 1
geomlist = measurelength()
for i in geomlist:
geomsheet.write(line, 0, i[0])
geomsheet.write(line, 1, i[1])
geomsheet.write(line, 2, i[2])
geomsheet.write(line, 3, i[3])
line += 1
print "Subfile Length Errors..."
subsheet = book.add_sheet("Subfile Lengths")
line = 0
subsheet.write(line, 0, "The following Route IDs contain an error between their line and SUBFILES lengths")
line += 1
subsheet.write(line, 0, "RTE_ID")
subsheet.write(line, 1, "District")
subsheet.write(line, 2, "BMP")
subsheet.write(line, 3, "Min Measure")
subsheet.write(line, 4, "EMP")
subsheet.write(line, 5, "Max Measure")
subsheet.write(line, 6, "Subfile Len")
subsheet.write(line, 7, "RTE_LEN")
subsheet.write(line, 8, "Description")
line += 1
sublist = subfilelength()
for i in sublist:
subsheet.write(line, 0, i[0])
subsheet.write(line, 1, i[1])
subsheet.write(line, 2, i[2])
subsheet.write(line, 3, i[3])
subsheet.write(line, 4, i[4])
subsheet.write(line, 5, i[5])
subsheet.write(line, 6, i[6])
subsheet.write(line, 7, i[7])
subsheet.write(line, 8, i[8])
line += 1
book.save(workspace + "\\ErrorReport_" + today + ".xls")
print "and away we go... " + str(now)
assemblereport()
print "that's all folks!" + str(now)
|
|
from __future__ import unicode_literals
from rbpkg.utils.matches import matches_current_system, matches_version_range
class PackageRules(object):
"""A set of rules for installing and managing packages.
The rules provide rbpkg with the information needed to install or manage
packages, and to handle non-Python dependencies or to replace packages
with other alternatives.
Each rule may match one or more versions by specifying a version range.
Attributes:
channel (rbpkg.repository.package_channel.PackageChannel):
The channel this version is a part.
version_range (unicode):
The version range that these rules apply to, or ``*`` to match
all versions.
package_type (unicode):
The type of package. Must be one of
:py:attr:`PACKAGE_TYPE_DEB`, :py:attr:`PACKAGE_TYPE_PYTHON`,
:py:attr:`PACKAGE_TYPE_RPM`, or :py:attr:`PACKAGE_TYPE_SOURCE`.
package_name (unicode):
The name of the package in the package manager.
systems (list of unicode):
A list of systems that these rules apply to. The special value
of ``*`` matches all systems.
Valid entries are "macosx", "windows", or any Linux distribution
matching the result of :py:func:`platform.dist`.
required_dependencies (list of unicode):
A list of package bundle names that this depends on.
recommended_dependencies (list of unicode):
A list of package bundle names that this recommends.
optional_dependencies (list of unicode):
A list of package bundle names that are optional dependencies.
replaces (list of unicode):
A list of package bundle names that this package replaces.
pre_install_commands (list of unicode):
A list of shell commands to perform prior to installation.
install_commands (list of unicode):
A list of shell commands to perform for installation. If not
set, the native package manager for this package type will be
used to install the given package.
post_install_commands (list of unicode):
A list of shell commands to perform after installation.
install_flags (list of unicode):
A list of flags to pass to the native package manager.
uninstall_commands (list of unicode):
A list of shell commands to perform for uninstallation. If not
set, the native package manager for this package type will be
used to uninstall the given package.
"""
#: Python packages (eggs or wheels).
PACKAGE_TYPE_PYTHON = 'python'
#: RPM packages.
PACKAGE_TYPE_RPM = 'rpm'
#: Debian packages.
PACKAGE_TYPE_DEB = 'deb'
#: Source installs.
PACKAGE_TYPE_SOURCE = 'source'
@classmethod
def deserialize(cls, channel, data):
"""Deserialize a payload into a PackageRules.
Args:
channel (rbpkg.repository.package_channel.PackageChannel):
The channel that contains this set of rules.
data (dict):
The JSON dictionary data for the rules definitions.
Returns:
PackageRules:
The resulting package rules.
"""
deps = data.get('dependencies', {})
return PackageRules(
channel,
version_range=data['version_range'],
package_type=data['package_type'],
package_name=data.get('package_name'),
systems=data['systems'],
required_dependencies=deps.get('required'),
recommended_dependencies=deps.get('recommended'),
optional_dependencies=deps.get('optional'),
replaces=data.get('replaces'),
pre_install_commands=data.get('pre_install_commands'),
install_commands=data.get('install_commands'),
post_install_commands=data.get('post_install_commands'),
install_flags=data.get('install_flags'),
uninstall_commands=data.get('uninstall_commands'))
def __init__(self, channel, version_range=None, package_type=None,
package_name=None, systems=[], required_dependencies=[],
recommended_dependencies=[], optional_dependencies=[],
replaces=[], pre_install_commands=[], install_commands=[],
post_install_commands=[], install_flags=[],
uninstall_commands=[]):
"""Initialize the package rules.
Args:
channel (rbpkg.repository.package_channel.PackageChannel):
The channel that contains this set of rules.
version_range (unicode):
The version range that these rules apply to, or ``*`` to match
all versions.
package_type (unicode):
The type of package. Must be one of
:py:attr:`PACKAGE_TYPE_DEB`, :py:attr:`PACKAGE_TYPE_PYTHON`,
:py:attr:`PACKAGE_TYPE_RPM`, or :py:attr:`PACKAGE_TYPE_SOURCE`.
package_name (unicode):
The name of the package in the package manager.
systems (list of unicode):
A list of systems that these rules apply to. The special value
of ``*`` matches all systems.
Valid entries are "macosx", "windows", or any Linux
distribution matching the result of :py:func:`platform.dist`.
required_dependencies (list of unicode):
A list of package bundle names that this depends on.
recommended_dependencies (list of unicode):
A list of package bundle names that this recommends.
optional_dependencies (list of unicode):
A list of package bundle names that are optional dependencies.
replaces (list of unicode):
A list of package bundle names that this package replaces.
pre_install_commands (list of unicode):
A list of shell commands to perform prior to installation.
install_commands (list of unicode):
A list of shell commands to perform for installation. If not
set, the native package manager for this package type will be
used to install the given package.
post_install_commands (list of unicode):
A list of shell commands to perform after installation.
install_flags (list of unicode):
A list of flags to pass to the native package manager.
uninstall_commands (list of unicode):
A list of shell commands to perform for uninstallation. If not
set, the native package manager for this package type will be
used to uninstall the given package.
"""
self.channel = channel
self.version_range = version_range
self.package_type = package_type
self.package_name = package_name
self.systems = systems or []
self.required_dependencies = required_dependencies or []
self.recommended_dependencies = recommended_dependencies or []
self.optional_dependencies = optional_dependencies or []
self.replaces = replaces or []
self.pre_install_commands = pre_install_commands or []
self.install_commands = install_commands or []
self.post_install_commands = post_install_commands or []
self.install_flags = install_flags or []
self.uninstall_commands = uninstall_commands or []
def matches_version(self, version, require_current_system=True):
"""Return whether these rules match the given version.
By default, this will also check if it matches the current system.
Args:
version (unicode):
The version to restrict rules to.
require_current_system (bool):
If set, only rules valid for the current system will be
returned.
Returns:
bool:
``True`` if this set of rules matches the given criteria.
"""
version_range = self.package_name + self.version_range
return ((self.version_range == '*' or
matches_version_range(version, version_range)) and
(not require_current_system or
matches_current_system(self.systems)))
def serialize(self):
"""Serialize the package rules into a JSON-serializable format.
The resulting output can be embedded into the channel data.
Returns:
dict:
The serialized package rules data.
"""
deps = {}
if self.required_dependencies:
deps['required'] = self.required_dependencies
if self.recommended_dependencies:
deps['recommended'] = self.recommended_dependencies
if self.optional_dependencies:
deps['optional'] = self.optional_dependencies
data = {
'version_range': self.version_range,
'package_type': self.package_type,
'package_name': self.package_name,
'systems': self.systems,
}
if deps:
data['dependencies'] = deps
optional_fields = (
('replaces', self.replaces),
('pre_install_commands', self.pre_install_commands),
('install_commands', self.install_commands),
('post_install_commands', self.post_install_commands),
('install_flags', self.install_flags),
('uninstall_commands', self.uninstall_commands),
)
for field_name, value in optional_fields:
if value:
data[field_name] = value
return data
def __repr__(self):
return (
'<PackageRules(version_range=%s; package_type=%s; '
'package_name=%s>'
% (self.version_range, self.package_type, self.package_name)
)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteConnectionsOperations(object):
"""ExpressRouteConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
put_express_route_connection_parameters, # type: "_models.ExpressRouteConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnection"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(put_express_route_connection_parameters, 'ExpressRouteConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
put_express_route_connection_parameters, # type: "_models.ExpressRouteConnection"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ExpressRouteConnection"]
"""Creates a connection between an ExpressRoute gateway and an ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:param put_express_route_connection_parameters: Parameters required in an
ExpressRouteConnection PUT operation.
:type put_express_route_connection_parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ExpressRouteConnection or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
put_express_route_connection_parameters=put_express_route_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnection"
"""Gets the specified ExpressRouteConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the ExpressRoute connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a connection to a ExpressRoute circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:param connection_name: The name of the connection subresource.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
express_route_gateway_name=express_route_gateway_name,
connection_name=connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
express_route_gateway_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExpressRouteConnectionList"
"""Lists ExpressRouteConnections.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_gateway_name: The name of the ExpressRoute gateway.
:type express_route_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteConnectionList, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteConnectionList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteConnectionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRouteGatewayName': self._serialize.url("express_route_gateway_name", express_route_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteConnectionList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}/expressRouteConnections'} # type: ignore
|
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""2D conv layers that are expected to be used with sequence inputs."""
import math
from absl import flags
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import base_layer
from REDACTED.transformer_lingvo.lingvo.core import builder
from REDACTED.transformer_lingvo.lingvo.core import builder_layers
from REDACTED.transformer_lingvo.lingvo.core import conv_layers_with_time_padding
from REDACTED.transformer_lingvo.lingvo.core import py_utils
FLAGS = flags.FLAGS
Conv2DLayerWithPadding = conv_layers_with_time_padding.Conv2DLayerWithPadding
CausalConv2DLayerWithPadding = conv_layers_with_time_padding.CausalConv2DLayerWithPadding
DepthwiseConv2DLayer = conv_layers_with_time_padding.DepthwiseConv2DLayer
CausalDepthwiseConv2DLayer = conv_layers_with_time_padding.CausalDepthwiseConv2DLayer
ConvBatchNormLayer = conv_layers_with_time_padding.ConvBatchNormLayer
ActivationLayer = conv_layers_with_time_padding.ActivationLayer
PaddingLayer = conv_layers_with_time_padding.PaddingLayer
NormalizedDepthwiseConv2DLayer = conv_layers_with_time_padding.NormalizedDepthwiseConv2DLayer
CausalNormalizedDepthwiseConv2DLayer = conv_layers_with_time_padding.CausalNormalizedDepthwiseConv2DLayer
GlobalPoolingLayer = conv_layers_with_time_padding.GlobalPoolingLayer
class BiasLayer(builder_layers.BiasLayer):
def FProp(self, theta, inputs, paddings):
bias_added = super(BiasLayer, self).FProp(theta, inputs)
return bias_added, paddings
class CausalPoolingLayer(base_layer.BaseLayer):
"""Pooling layer with causal dependency on the time axis."""
@classmethod
def Params(cls):
p = super(CausalPoolingLayer, cls).Params()
p.Define('pooling_type', 'AVG', 'Pooling type: MAX|AVG')
p.Define(
'left_context', None, 'Number of frames to the left in the pooling'
'window (including the current frame).')
return p
def FProp(self, theta, inputs, paddings):
"""Applies causal pooling to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor. It is expected to be of shape [batch,
time].
Returns:
outputs, out_paddings pair.
- outputs: has the same shape as inputs.
- out_paddings: has the same tshape as paddings.
"""
p = self.params
if p.left_context is None:
raise ValueError('left_context must be set.')
window_size = p.left_context
left_pad_size = window_size - 1
large_negative = p.dtype.max * tf.constant(-0.7, dtype=p.dtype)
# For max pooling, use a large negative padding value such that the max
# element is almost always from a non-padding position.
pad_value = 0 if p.pooling_type == 'AVG' else large_negative
inputs = tf.pad(
inputs, [[0, 0], [left_pad_size, 0], [0, 0], [0, 0]],
constant_values=pad_value)
out_feature = tf.nn.pool(
inputs,
window_shape=(window_size, 1),
pooling_type=p.pooling_type,
padding='VALID')
if p.pooling_type == 'AVG':
# Count the fraction of non-padding elements inside each pooling window.
in_mask = tf.pad(1.0 - paddings, [[0, 0], [left_pad_size, 0]])
non_padding_ratio = tf.nn.pool(
in_mask[:, :, tf.newaxis],
window_shape=(window_size,),
pooling_type='AVG',
padding='VALID')
# Divide by non-padding ratios to eliminate the effect of padded zeros.
out_feature *= tf.math.reciprocal_no_nan(non_padding_ratio[...,
tf.newaxis])
out_feature *= 1.0 - paddings[..., tf.newaxis, tf.newaxis]
return out_feature, paddings
class Builder(builder.Base):
"""Builder patterns for commonly used conv layers."""
@classmethod
def Params(cls):
p = super(Builder, cls).Params()
p.Define('use_bn', True, 'Add additional bn layers to conv layers or not.')
p.Define('weight_norm', False, 'Add weight norm for kernel weights or not.')
return p
def _Bias(self, name, dims):
"""Bias layer. The bias is added to the last dimension of the input."""
return BiasLayer.Params().Set(name=name, dims=dims)
def _BN(self, name, dims, decay=0.999):
return ConvBatchNormLayer.Params().Set(name=name, dim=dims, decay=decay)
def _BiasOrBN(self, name, dims):
if self.params.use_bn:
return self._BN(name, dims)
else:
return self._Bias(name, dims)
def _MaybeBN(self, name, dims):
if self.params.use_bn:
return self._BN(name, dims)
else:
return self._Id(name)
def _Activation(self, name, activation):
return ActivationLayer.Params().Set(name=name, activation=activation)
def _Padding(self, name):
return PaddingLayer.Params().Set(name=name)
def _RawConv2D(self, name, in_dim, out_dim, filter_shape, stride, dilation,
is_causal):
if is_causal:
conv_cls = CausalConv2DLayerWithPadding
else:
conv_cls = Conv2DLayerWithPadding
return conv_cls.Params().Set(
name=name,
filter_shape=filter_shape + [in_dim, out_dim],
filter_stride=stride,
dilation_rate=dilation,
weight_norm=self.params.weight_norm)
def _RawDepthwiseConv2D(self, name, in_dim, depth_multiplier, filter_shape,
stride, dilation, is_causal):
if is_causal:
conv_cls = CausalDepthwiseConv2DLayer
else:
conv_cls = DepthwiseConv2DLayer
return conv_cls.Params().Set(
name=name,
filter_shape=filter_shape + [in_dim, depth_multiplier],
filter_stride=stride,
dilation_rate=dilation,
weight_norm=self.params.weight_norm)
def _GlobalPooling(self, name, pooling_type):
return GlobalPoolingLayer.Params().Set(name=name, pooling_type=pooling_type)
def Conv2D(self,
name,
in_dim,
out_dim,
filter_shape,
stride=None,
dilation=None,
activation='RELU',
conv_last=False,
is_causal=False):
if stride is None:
stride = [1, 1]
if dilation is None:
dilation = [1, 1]
if conv_last:
layers_in_sequence = [
self._MaybeBN('bn', in_dim),
self._Activation('act', activation),
self._RawConv2D('conv_2d', in_dim, out_dim, filter_shape, stride,
dilation, is_causal),
self._Bias('bias', out_dim),
self._Padding('pad')
]
else:
layers_in_sequence = [
self._RawConv2D('conv_2d', in_dim, out_dim, filter_shape, stride,
dilation, is_causal),
self._BiasOrBN('bn_or_bias', out_dim),
self._Activation('act', activation),
self._Padding('pad')
]
return self._Seq(name, *layers_in_sequence)
def DepthwiseConv2D(self,
name,
in_dim,
depth_multiplier,
filter_shape,
stride=None,
dilation=None,
activation='RELU',
conv_last=False,
is_causal=False):
if stride is None:
stride = [1, 1]
if dilation is None:
dilation = [1, 1]
if conv_last:
layers_in_sequence = [
self._MaybeBN('bn', in_dim),
self._Activation('act', activation),
self._RawDepthwiseConv2D('conv_2d', in_dim, depth_multiplier,
filter_shape, stride, dilation, is_causal),
self._Bias('bias', in_dim * depth_multiplier),
self._Padding('pad')
]
else:
layers_in_sequence = [
self._RawDepthwiseConv2D('conv_2d', in_dim, depth_multiplier,
filter_shape, stride, dilation, is_causal),
self._BiasOrBN('bn_or_bias', in_dim * depth_multiplier),
self._Activation('act', activation),
self._Padding('pad')
]
return self._Seq(name, *layers_in_sequence)
def SeparableConv2D(self,
name,
in_dim,
out_dim,
depth_multiplier,
filter_shape,
stride=None,
dilation=None,
activation='RELU',
conv_last=False,
is_causal=False):
if stride is None:
stride = [1, 1]
if dilation is None:
dilation = [1, 1]
if conv_last:
layers_in_sequence = [
self._MaybeBN('bn', in_dim),
self._Activation('act', activation),
self._RawDepthwiseConv2D('conv_2d', in_dim, depth_multiplier,
filter_shape, stride, dilation, is_causal),
# No need to add a padding layer here as subsequent conv layer always
# properly zeros out padded nodes.
self._RawConv2D(
'conv_1x1',
in_dim * depth_multiplier,
out_dim,
filter_shape=[1, 1],
stride=[1, 1],
dilation=[1, 1],
is_causal=False),
self._Bias('bias', out_dim),
self._Padding('pad')
]
else:
layers_in_sequence = [
self._RawDepthwiseConv2D('conv_2d', in_dim, depth_multiplier,
filter_shape, stride, dilation, is_causal),
# No need to add a padding layer here as subsequent conv layer always
# properly zeros out padded nodes.
self._RawConv2D(
'conv_1x1',
in_dim * depth_multiplier,
out_dim,
filter_shape=[1, 1],
stride=[1, 1],
dilation=[1, 1],
is_causal=False),
self._BiasOrBN('bn_or_bias', out_dim),
self._Activation('act', activation),
self._Padding('pad')
]
return self._Seq(name, *layers_in_sequence)
def NormalizedDepthwiseConv2D(self,
name,
kernel_size,
num_heads,
in_dim,
dropconnect_prob=0,
deterministic_dropout=False,
is_causal=False):
if is_causal:
conv_cls = CausalNormalizedDepthwiseConv2DLayer
else:
conv_cls = NormalizedDepthwiseConv2DLayer
return conv_cls.Params().Set(
name=name,
filter_shape=[kernel_size, 1, num_heads, 1],
weight_tiling_factor=in_dim // num_heads,
deterministic_dropout=deterministic_dropout,
params_init=py_utils.WeightInit.TruncatedGaussian(
scale=math.sqrt(2.6 / kernel_size)), # Fan-out initialization.
dropconnect_prob=dropconnect_prob)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import json
import warnings
from typing import (
cast,
overload,
Any,
Callable,
Iterable,
List,
Optional,
Tuple,
TYPE_CHECKING,
Union,
)
from py4j.java_gateway import JavaObject # type: ignore[import]
from pyspark import copy_func
from pyspark.context import SparkContext
from pyspark.sql.types import DataType, StructField, StructType, IntegerType, StringType
if TYPE_CHECKING:
from pyspark.sql._typing import ColumnOrName, LiteralType, DecimalLiteral, DateTimeLiteral
from pyspark.sql.window import WindowSpec
__all__ = ["Column"]
def _create_column_from_literal(literal: Union["LiteralType", "DecimalLiteral"]) -> "Column":
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
return sc._jvm.functions.lit(literal)
def _create_column_from_name(name: str) -> "Column":
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
return sc._jvm.functions.col(name)
def _to_java_column(col: "ColumnOrName") -> JavaObject:
if isinstance(col, Column):
jcol = col._jc
elif isinstance(col, str):
jcol = _create_column_from_name(col)
else:
raise TypeError(
"Invalid argument, not a string or column: "
"{0} of type {1}. "
"For column literals, use 'lit', 'array', 'struct' or 'create_map' "
"function.".format(col, type(col))
)
return jcol
def _to_seq(
sc: SparkContext,
cols: Iterable["ColumnOrName"],
converter: Optional[Callable[["ColumnOrName"], JavaObject]] = None,
) -> JavaObject:
"""
Convert a list of Column (or names) into a JVM Seq of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toSeq(cols) # type: ignore[attr-defined]
def _to_list(
sc: SparkContext,
cols: List["ColumnOrName"],
converter: Optional[Callable[["ColumnOrName"], JavaObject]] = None,
) -> JavaObject:
"""
Convert a list of Column (or names) into a JVM (Scala) List of Column.
An optional `converter` could be used to convert items in `cols`
into JVM Column objects.
"""
if converter:
cols = [converter(c) for c in cols]
return sc._jvm.PythonUtils.toList(cols) # type: ignore[attr-defined]
def _unary_op(
name: str,
doc: str = "unary operator",
) -> Callable[["Column"], "Column"]:
"""Create a method for given unary operator"""
def _(self: "Column") -> "Column":
jc = getattr(self._jc, name)()
return Column(jc)
_.__doc__ = doc
return _
def _func_op(name: str, doc: str = "") -> Callable[["Column"], "Column"]:
def _(self: "Column") -> "Column":
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
jc = getattr(sc._jvm.functions, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
def _bin_func_op(
name: str,
reverse: bool = False,
doc: str = "binary function",
) -> Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"]:
def _(self: "Column", other: Union["Column", "LiteralType", "DecimalLiteral"]) -> "Column":
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
fn = getattr(sc._jvm.functions, name)
jc = other._jc if isinstance(other, Column) else _create_column_from_literal(other)
njc = fn(self._jc, jc) if not reverse else fn(jc, self._jc)
return Column(njc)
_.__doc__ = doc
return _
def _bin_op(
name: str,
doc: str = "binary operator",
) -> Callable[
["Column", Union["Column", "LiteralType", "DecimalLiteral", "DateTimeLiteral"]], "Column"
]:
"""Create a method for given binary operator"""
def _(
self: "Column",
other: Union["Column", "LiteralType", "DecimalLiteral", "DateTimeLiteral"],
) -> "Column":
jc = other._jc if isinstance(other, Column) else other
njc = getattr(self._jc, name)(jc)
return Column(njc)
_.__doc__ = doc
return _
def _reverse_op(
name: str,
doc: str = "binary operator",
) -> Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"]:
"""Create a method for binary operator (this object is on right side)"""
def _(self: "Column", other: Union["LiteralType", "DecimalLiteral"]) -> "Column":
jother = _create_column_from_literal(other)
jc = getattr(jother, name)(self._jc)
return Column(jc)
_.__doc__ = doc
return _
class Column(object):
"""
A column in a DataFrame.
:class:`Column` instances can be created by::
# 1. Select a column out of a DataFrame
df.colName
df["colName"]
# 2. Create from an expression
df.colName + 1
1 / df.colName
.. versionadded:: 1.3.0
"""
def __init__(self, jc: JavaObject) -> None:
self._jc = jc
# arithmetic operators
__neg__ = _func_op("negate")
__add__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("plus"),
)
__sub__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("minus"),
)
__mul__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("multiply"),
)
__div__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("divide"),
)
__truediv__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("divide"),
)
__mod__ = cast(
Callable[["Column", Union["Column", "LiteralType", "DecimalLiteral"]], "Column"],
_bin_op("mod"),
)
__radd__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"], _bin_op("plus")
)
__rsub__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"], _reverse_op("minus")
)
__rmul__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"], _bin_op("multiply")
)
__rdiv__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"],
_reverse_op("divide"),
)
__rtruediv__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"],
_reverse_op("divide"),
)
__rmod__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"], _reverse_op("mod")
)
__pow__ = _bin_func_op("pow")
__rpow__ = cast(
Callable[["Column", Union["LiteralType", "DecimalLiteral"]], "Column"],
_bin_func_op("pow", reverse=True),
)
# logistic operators
def __eq__( # type: ignore[override]
self,
other: Union["Column", "LiteralType", "DecimalLiteral", "DateTimeLiteral"],
) -> "Column":
"""binary function"""
return _bin_op("equalTo")(self, other)
def __ne__( # type: ignore[override]
self,
other: Any,
) -> "Column":
"""binary function"""
return _bin_op("notEqual")(self, other)
__lt__ = _bin_op("lt")
__le__ = _bin_op("leq")
__ge__ = _bin_op("geq")
__gt__ = _bin_op("gt")
_eqNullSafe_doc = """
Equality test that is safe for null values.
.. versionadded:: 2.3.0
Parameters
----------
other
a value or :class:`Column`
Examples
--------
>>> from pyspark.sql import Row
>>> df1 = spark.createDataFrame([
... Row(id=1, value='foo'),
... Row(id=2, value=None)
... ])
>>> df1.select(
... df1['value'] == 'foo',
... df1['value'].eqNullSafe('foo'),
... df1['value'].eqNullSafe(None)
... ).show()
+-------------+---------------+----------------+
|(value = foo)|(value <=> foo)|(value <=> NULL)|
+-------------+---------------+----------------+
| true| true| false|
| null| false| true|
+-------------+---------------+----------------+
>>> df2 = spark.createDataFrame([
... Row(value = 'bar'),
... Row(value = None)
... ])
>>> df1.join(df2, df1["value"] == df2["value"]).count()
0
>>> df1.join(df2, df1["value"].eqNullSafe(df2["value"])).count()
1
>>> df2 = spark.createDataFrame([
... Row(id=1, value=float('NaN')),
... Row(id=2, value=42.0),
... Row(id=3, value=None)
... ])
>>> df2.select(
... df2['value'].eqNullSafe(None),
... df2['value'].eqNullSafe(float('NaN')),
... df2['value'].eqNullSafe(42.0)
... ).show()
+----------------+---------------+----------------+
|(value <=> NULL)|(value <=> NaN)|(value <=> 42.0)|
+----------------+---------------+----------------+
| false| true| false|
| false| false| true|
| true| false| false|
+----------------+---------------+----------------+
Notes
-----
Unlike Pandas, PySpark doesn't consider NaN values to be NULL. See the
`NaN Semantics <https://spark.apache.org/docs/latest/sql-ref-datatypes.html#nan-semantics>`_
for details.
"""
eqNullSafe = _bin_op("eqNullSafe", _eqNullSafe_doc)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = _bin_op("and")
__or__ = _bin_op("or")
__invert__ = _func_op("not")
__rand__ = _bin_op("and")
__ror__ = _bin_op("or")
# container operators
def __contains__(self, item: Any) -> None:
raise ValueError(
"Cannot apply 'in' operator against a column: please use 'contains' "
"in a string column or 'array_contains' function for an array column."
)
# bitwise operators
_bitwiseOR_doc = """
Compute bitwise OR of this expression with another expression.
Parameters
----------
other
a value or :class:`Column` to calculate bitwise or(|) with
this :class:`Column`.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseOR(df.b)).collect()
[Row((a | b)=235)]
"""
_bitwiseAND_doc = """
Compute bitwise AND of this expression with another expression.
Parameters
----------
other
a value or :class:`Column` to calculate bitwise and(&) with
this :class:`Column`.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseAND(df.b)).collect()
[Row((a & b)=10)]
"""
_bitwiseXOR_doc = """
Compute bitwise XOR of this expression with another expression.
Parameters
----------
other
a value or :class:`Column` to calculate bitwise xor(^) with
this :class:`Column`.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(a=170, b=75)])
>>> df.select(df.a.bitwiseXOR(df.b)).collect()
[Row((a ^ b)=225)]
"""
bitwiseOR = _bin_op("bitwiseOR", _bitwiseOR_doc)
bitwiseAND = _bin_op("bitwiseAND", _bitwiseAND_doc)
bitwiseXOR = _bin_op("bitwiseXOR", _bitwiseXOR_doc)
def getItem(self, key: Any) -> "Column":
"""
An expression that gets an item at position ``ordinal`` out of a list,
or gets an item by key out of a dict.
.. versionadded:: 1.3.0
Examples
--------
>>> df = spark.createDataFrame([([1, 2], {"key": "value"})], ["l", "d"])
>>> df.select(df.l.getItem(0), df.d.getItem("key")).show()
+----+------+
|l[0]|d[key]|
+----+------+
| 1| value|
+----+------+
"""
if isinstance(key, Column):
warnings.warn(
"A column as 'key' in getItem is deprecated as of Spark 3.0, and will not "
"be supported in the future release. Use `column[key]` or `column.key` syntax "
"instead.",
FutureWarning,
)
return self[key]
def getField(self, name: Any) -> "Column":
"""
An expression that gets a field by name in a :class:`StructType`.
.. versionadded:: 1.3.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(r=Row(a=1, b="b"))])
>>> df.select(df.r.getField("b")).show()
+---+
|r.b|
+---+
| b|
+---+
>>> df.select(df.r.a).show()
+---+
|r.a|
+---+
| 1|
+---+
"""
if isinstance(name, Column):
warnings.warn(
"A column as 'name' in getField is deprecated as of Spark 3.0, and will not "
"be supported in the future release. Use `column[name]` or `column.name` syntax "
"instead.",
FutureWarning,
)
return self[name]
def withField(self, fieldName: str, col: "Column") -> "Column":
"""
An expression that adds/replaces a field in :class:`StructType` by name.
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import lit
>>> df = spark.createDataFrame([Row(a=Row(b=1, c=2))])
>>> df.withColumn('a', df['a'].withField('b', lit(3))).select('a.b').show()
+---+
| b|
+---+
| 3|
+---+
>>> df.withColumn('a', df['a'].withField('d', lit(4))).select('a.d').show()
+---+
| d|
+---+
| 4|
+---+
"""
if not isinstance(fieldName, str):
raise TypeError("fieldName should be a string")
if not isinstance(col, Column):
raise TypeError("col should be a Column")
return Column(self._jc.withField(fieldName, col._jc))
def dropFields(self, *fieldNames: str) -> "Column":
"""
An expression that drops fields in :class:`StructType` by name.
This is a no-op if schema doesn't contain field name(s).
.. versionadded:: 3.1.0
Examples
--------
>>> from pyspark.sql import Row
>>> from pyspark.sql.functions import col, lit
>>> df = spark.createDataFrame([
... Row(a=Row(b=1, c=2, d=3, e=Row(f=4, g=5, h=6)))])
>>> df.withColumn('a', df['a'].dropFields('b')).show()
+-----------------+
| a|
+-----------------+
|{2, 3, {4, 5, 6}}|
+-----------------+
>>> df.withColumn('a', df['a'].dropFields('b', 'c')).show()
+--------------+
| a|
+--------------+
|{3, {4, 5, 6}}|
+--------------+
This method supports dropping multiple nested fields directly e.g.
>>> df.withColumn("a", col("a").dropFields("e.g", "e.h")).show()
+--------------+
| a|
+--------------+
|{1, 2, 3, {4}}|
+--------------+
However, if you are going to add/replace multiple nested fields,
it is preferred to extract out the nested struct before
adding/replacing multiple fields e.g.
>>> df.select(col("a").withField(
... "e", col("a.e").dropFields("g", "h")).alias("a")
... ).show()
+--------------+
| a|
+--------------+
|{1, 2, 3, {4}}|
+--------------+
"""
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
jc = self._jc.dropFields(_to_seq(sc, fieldNames))
return Column(jc)
def __getattr__(self, item: Any) -> "Column":
if item.startswith("__"):
raise AttributeError(item)
return self[item]
def __getitem__(self, k: Any) -> "Column":
if isinstance(k, slice):
if k.step is not None:
raise ValueError("slice with step is not supported.")
return self.substr(k.start, k.stop)
else:
return _bin_op("apply")(self, k)
def __iter__(self) -> None:
raise TypeError("Column is not iterable")
# string methods
_contains_doc = """
Contains the other element. Returns a boolean :class:`Column` based on a string match.
Parameters
----------
other
string in line. A value as a literal or a :class:`Column`.
Examples
--------
>>> df.filter(df.name.contains('o')).collect()
[Row(age=5, name='Bob')]
"""
_rlike_doc = """
SQL RLIKE expression (LIKE with Regex). Returns a boolean :class:`Column` based on a regex
match.
Parameters
----------
other : str
an extended regex expression
Examples
--------
>>> df.filter(df.name.rlike('ice$')).collect()
[Row(age=2, name='Alice')]
"""
_like_doc = """
SQL like expression. Returns a boolean :class:`Column` based on a SQL LIKE match.
Parameters
----------
other : str
a SQL LIKE pattern
See Also
--------
pyspark.sql.Column.rlike
Examples
--------
>>> df.filter(df.name.like('Al%')).collect()
[Row(age=2, name='Alice')]
"""
_ilike_doc = """
SQL ILIKE expression (case insensitive LIKE). Returns a boolean :class:`Column`
based on a case insensitive match.
.. versionadded:: 3.3.0
Parameters
----------
other : str
a SQL LIKE pattern
See Also
--------
pyspark.sql.Column.rlike
Examples
--------
>>> df.filter(df.name.ilike('%Ice')).collect()
[Row(age=2, name='Alice')]
"""
_startswith_doc = """
String starts with. Returns a boolean :class:`Column` based on a string match.
Parameters
----------
other : :class:`Column` or str
string at start of line (do not use a regex `^`)
Examples
--------
>>> df.filter(df.name.startswith('Al')).collect()
[Row(age=2, name='Alice')]
>>> df.filter(df.name.startswith('^Al')).collect()
[]
"""
_endswith_doc = """
String ends with. Returns a boolean :class:`Column` based on a string match.
Parameters
----------
other : :class:`Column` or str
string at end of line (do not use a regex `$`)
Examples
--------
>>> df.filter(df.name.endswith('ice')).collect()
[Row(age=2, name='Alice')]
>>> df.filter(df.name.endswith('ice$')).collect()
[]
"""
contains = _bin_op("contains", _contains_doc)
rlike = _bin_op("rlike", _rlike_doc)
like = _bin_op("like", _like_doc)
ilike = _bin_op("ilike", _ilike_doc)
startswith = _bin_op("startsWith", _startswith_doc)
endswith = _bin_op("endsWith", _endswith_doc)
@overload
def substr(self, startPos: int, length: int) -> "Column":
...
@overload
def substr(self, startPos: "Column", length: "Column") -> "Column":
...
def substr(self, startPos: Union[int, "Column"], length: Union[int, "Column"]) -> "Column":
"""
Return a :class:`Column` which is a substring of the column.
.. versionadded:: 1.3.0
Parameters
----------
startPos : :class:`Column` or int
start position
length : :class:`Column` or int
length of the substring
Examples
--------
>>> df.select(df.name.substr(1, 3).alias("col")).collect()
[Row(col='Ali'), Row(col='Bob')]
"""
if type(startPos) != type(length):
raise TypeError(
"startPos and length must be the same type. "
"Got {startPos_t} and {length_t}, respectively.".format(
startPos_t=type(startPos),
length_t=type(length),
)
)
if isinstance(startPos, int):
jc = self._jc.substr(startPos, length)
elif isinstance(startPos, Column):
jc = self._jc.substr(cast("Column", startPos)._jc, cast("Column", length)._jc)
else:
raise TypeError("Unexpected type: %s" % type(startPos))
return Column(jc)
def isin(self, *cols: Any) -> "Column":
"""
A boolean expression that is evaluated to true if the value of this
expression is contained by the evaluated values of the arguments.
.. versionadded:: 1.5.0
Examples
--------
>>> df[df.name.isin("Bob", "Mike")].collect()
[Row(age=5, name='Bob')]
>>> df[df.age.isin([1, 2, 3])].collect()
[Row(age=2, name='Alice')]
"""
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cast(Tuple, cols[0])
cols = cast(
Tuple,
[c._jc if isinstance(c, Column) else _create_column_from_literal(c) for c in cols],
)
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
jc = getattr(self._jc, "isin")(_to_seq(sc, cols))
return Column(jc)
# order
_asc_doc = """
Returns a sort expression based on ascending order of the column.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc()).collect()
[Row(name='Alice'), Row(name='Tom')]
"""
_asc_nulls_first_doc = """
Returns a sort expression based on ascending order of the column, and null values
return before non-null values.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_first()).collect()
[Row(name=None), Row(name='Alice'), Row(name='Tom')]
"""
_asc_nulls_last_doc = """
Returns a sort expression based on ascending order of the column, and null values
appear after non-null values.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.asc_nulls_last()).collect()
[Row(name='Alice'), Row(name='Tom'), Row(name=None)]
"""
_desc_doc = """
Returns a sort expression based on the descending order of the column.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc()).collect()
[Row(name='Tom'), Row(name='Alice')]
"""
_desc_nulls_first_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear before non-null values.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_first()).collect()
[Row(name=None), Row(name='Tom'), Row(name='Alice')]
"""
_desc_nulls_last_doc = """
Returns a sort expression based on the descending order of the column, and null values
appear after non-null values.
.. versionadded:: 2.4.0
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([('Tom', 80), (None, 60), ('Alice', None)], ["name", "height"])
>>> df.select(df.name).orderBy(df.name.desc_nulls_last()).collect()
[Row(name='Tom'), Row(name='Alice'), Row(name=None)]
"""
asc = _unary_op("asc", _asc_doc)
asc_nulls_first = _unary_op("asc_nulls_first", _asc_nulls_first_doc)
asc_nulls_last = _unary_op("asc_nulls_last", _asc_nulls_last_doc)
desc = _unary_op("desc", _desc_doc)
desc_nulls_first = _unary_op("desc_nulls_first", _desc_nulls_first_doc)
desc_nulls_last = _unary_op("desc_nulls_last", _desc_nulls_last_doc)
_isNull_doc = """
True if the current expression is null.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name='Tom', height=80), Row(name='Alice', height=None)])
>>> df.filter(df.height.isNull()).collect()
[Row(name='Alice', height=None)]
"""
_isNotNull_doc = """
True if the current expression is NOT null.
Examples
--------
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(name='Tom', height=80), Row(name='Alice', height=None)])
>>> df.filter(df.height.isNotNull()).collect()
[Row(name='Tom', height=80)]
"""
isNull = _unary_op("isNull", _isNull_doc)
isNotNull = _unary_op("isNotNull", _isNotNull_doc)
def alias(self, *alias: str, **kwargs: Any) -> "Column":
"""
Returns this column aliased with a new name or names (in the case of expressions that
return more than one column, such as explode).
.. versionadded:: 1.3.0
Parameters
----------
alias : str
desired column names (collects all positional arguments passed)
Other Parameters
----------------
metadata: dict
a dict of information to be stored in ``metadata`` attribute of the
corresponding :class:`StructField <pyspark.sql.types.StructField>` (optional, keyword
only argument)
.. versionchanged:: 2.2.0
Added optional ``metadata`` argument.
Examples
--------
>>> df.select(df.age.alias("age2")).collect()
[Row(age2=2), Row(age2=5)]
>>> df.select(df.age.alias("age3", metadata={'max': 99})).schema['age3'].metadata['max']
99
"""
metadata = kwargs.pop("metadata", None)
assert not kwargs, "Unexpected kwargs where passed: %s" % kwargs
sc = SparkContext._active_spark_context # type: ignore[attr-defined]
if len(alias) == 1:
if metadata:
jmeta = sc._jvm.org.apache.spark.sql.types.Metadata.fromJson(json.dumps(metadata))
return Column(getattr(self._jc, "as")(alias[0], jmeta))
else:
return Column(getattr(self._jc, "as")(alias[0]))
else:
if metadata:
raise ValueError("metadata can only be provided for a single column")
return Column(getattr(self._jc, "as")(_to_seq(sc, list(alias))))
name = copy_func(alias, sinceversion=2.0, doc=":func:`name` is an alias for :func:`alias`.")
def cast(self, dataType: Union[DataType, str]) -> "Column":
"""
Casts the column into type ``dataType``.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.age.cast("string").alias('ages')).collect()
[Row(ages='2'), Row(ages='5')]
>>> df.select(df.age.cast(StringType()).alias('ages')).collect()
[Row(ages='2'), Row(ages='5')]
"""
if isinstance(dataType, str):
jc = self._jc.cast(dataType)
elif isinstance(dataType, DataType):
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
jdt = spark._jsparkSession.parseDataType(dataType.json())
jc = self._jc.cast(jdt)
else:
raise TypeError("unexpected type: %s" % type(dataType))
return Column(jc)
astype = copy_func(cast, sinceversion=1.4, doc=":func:`astype` is an alias for :func:`cast`.")
def between(
self,
lowerBound: Union["Column", "LiteralType", "DateTimeLiteral", "DecimalLiteral"],
upperBound: Union["Column", "LiteralType", "DateTimeLiteral", "DecimalLiteral"],
) -> "Column":
"""
True if the current column is between the lower bound and upper bound, inclusive.
.. versionadded:: 1.3.0
Examples
--------
>>> df.select(df.name, df.age.between(2, 4)).show()
+-----+---------------------------+
| name|((age >= 2) AND (age <= 4))|
+-----+---------------------------+
|Alice| true|
| Bob| false|
+-----+---------------------------+
"""
return (self >= lowerBound) & (self <= upperBound)
def when(self, condition: "Column", value: Any) -> "Column":
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
.. versionadded:: 1.4.0
Parameters
----------
condition : :class:`Column`
a boolean :class:`Column` expression.
value
a literal value, or a :class:`Column` expression.
Examples
--------
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)).show()
+-----+------------------------------------------------------------+
| name|CASE WHEN (age > 4) THEN 1 WHEN (age < 3) THEN -1 ELSE 0 END|
+-----+------------------------------------------------------------+
|Alice| -1|
| Bob| 1|
+-----+------------------------------------------------------------+
See Also
--------
pyspark.sql.functions.when
"""
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = self._jc.when(condition._jc, v)
return Column(jc)
def otherwise(self, value: Any) -> "Column":
"""
Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
.. versionadded:: 1.4.0
Parameters
----------
value
a literal value, or a :class:`Column` expression.
Examples
--------
>>> from pyspark.sql import functions as F
>>> df.select(df.name, F.when(df.age > 3, 1).otherwise(0)).show()
+-----+-------------------------------------+
| name|CASE WHEN (age > 3) THEN 1 ELSE 0 END|
+-----+-------------------------------------+
|Alice| 0|
| Bob| 1|
+-----+-------------------------------------+
See Also
--------
pyspark.sql.functions.when
"""
v = value._jc if isinstance(value, Column) else value
jc = self._jc.otherwise(v)
return Column(jc)
def over(self, window: "WindowSpec") -> "Column":
"""
Define a windowing column.
.. versionadded:: 1.4.0
Parameters
----------
window : :class:`WindowSpec`
Returns
-------
:class:`Column`
Examples
--------
>>> from pyspark.sql import Window
>>> window = Window.partitionBy("name").orderBy("age") \
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
>>> from pyspark.sql.functions import rank, min
>>> from pyspark.sql.functions import desc
>>> df.withColumn("rank", rank().over(window)) \
.withColumn("min", min('age').over(window)).sort(desc("age")).show()
+---+-----+----+---+
|age| name|rank|min|
+---+-----+----+---+
| 5| Bob| 1| 5|
| 2|Alice| 1| 2|
+---+-----+----+---+
"""
from pyspark.sql.window import WindowSpec
if not isinstance(window, WindowSpec):
raise TypeError("window should be WindowSpec")
jc = self._jc.over(window._jspec)
return Column(jc)
def __nonzero__(self) -> None:
raise ValueError(
"Cannot convert column into bool: please use '&' for 'and', '|' for 'or', "
"'~' for 'not' when building DataFrame boolean expressions."
)
__bool__ = __nonzero__
def __repr__(self) -> str:
return "Column<'%s'>" % self._jc.toString()
def _test() -> None:
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
spark = SparkSession.builder.master("local[4]").appName("sql.column tests").getOrCreate()
sc = spark.sparkContext
globs["spark"] = spark
globs["df"] = sc.parallelize([(2, "Alice"), (5, "Bob")]).toDF(
StructType([StructField("age", IntegerType()), StructField("name", StringType())])
)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import migrate
from migrate import exceptions
from migrate.versioning import api as versioning_api
from oslo_db.sqlalchemy import migration
import six
import sqlalchemy
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.i18n import _
CONF = keystone.conf.CONF
USE_TRIGGERS = True
LEGACY_REPO = 'migrate_repo'
EXPAND_REPO = 'expand_repo'
DATA_MIGRATION_REPO = 'data_migration_repo'
CONTRACT_REPO = 'contract_repo'
class Repository(object):
def __init__(self, engine, repo_name):
self.repo_name = repo_name
self.repo_path = find_repo(self.repo_name)
self.min_version = (
get_init_version(abs_path=self.repo_path))
self.schema_ = versioning_api.ControlledSchema.create(
engine, self.repo_path, self.min_version)
self.max_version = self.schema_.repository.version().version
def upgrade(self, version=None, current_schema=None):
version = version or self.max_version
err = ''
upgrade = True
version = versioning_api._migrate_version(
self.schema_, version, upgrade, err)
if not current_schema:
current_schema = self.schema_
changeset = current_schema.changeset(version)
for ver, change in changeset:
self.schema_.runchange(ver, change, changeset.step)
if self.schema_.version != version:
raise Exception(
'Actual version (%s) of %s does not equal expected '
'version (%s)' % (
self.schema_.version, self.repo_name, version))
@property
def version(self):
with sql.session_for_read() as session:
return migration.db_version(
session.get_bind(), self.repo_path, self.min_version)
# Different RDBMSs use different schemes for naming the Foreign Key
# Constraints. SQLAlchemy does not yet attempt to determine the name
# for the constraint, and instead attempts to deduce it from the column.
# This fails on MySQL.
def get_constraints_names(table, column_name):
fkeys = [fk.name for fk in table.constraints
if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
column_name in fk.columns)]
return fkeys
# remove_constraints and add_constraints both accept a list of dictionaries
# that contain:
# {'table': a sqlalchemy table. The constraint is added to dropped from
# this table.
# 'fk_column': the name of a column on the above table, The constraint
# is added to or dropped from this column
# 'ref_column':a sqlalchemy column object. This is the reference column
# for the constraint.
def remove_constraints(constraints):
for constraint_def in constraints:
constraint_names = get_constraints_names(constraint_def['table'],
constraint_def['fk_column'])
for constraint_name in constraint_names:
migrate.ForeignKeyConstraint(
columns=[getattr(constraint_def['table'].c,
constraint_def['fk_column'])],
refcolumns=[constraint_def['ref_column']],
name=constraint_name).drop()
def add_constraints(constraints):
for constraint_def in constraints:
if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
# Don't try to create constraint when using MyISAM because it's
# not supported.
continue
ref_col = constraint_def['ref_column']
ref_engine = ref_col.table.kwargs.get('mysql_engine')
if ref_engine == 'MyISAM':
# Don't try to create constraint when using MyISAM because it's
# not supported.
continue
migrate.ForeignKeyConstraint(
columns=[getattr(constraint_def['table'].c,
constraint_def['fk_column'])],
refcolumns=[constraint_def['ref_column']]).create()
def rename_tables_with_constraints(renames, constraints, engine):
"""Rename tables with foreign key constraints.
Tables are renamed after first removing constraints. The constraints are
replaced after the rename is complete.
This works on databases that don't support renaming tables that have
constraints on them (DB2).
`renames` is a dict, mapping {'to_table_name': from_table, ...}
"""
if engine.name != 'sqlite':
# SQLite doesn't support constraints, so nothing to remove.
remove_constraints(constraints)
for to_table_name in renames:
from_table = renames[to_table_name]
from_table.rename(to_table_name)
if engine != 'sqlite':
add_constraints(constraints)
def find_repo(repo_name):
"""Return the absolute path to the named repository."""
path = os.path.abspath(os.path.join(
os.path.dirname(sql.__file__), repo_name))
if not os.path.isdir(path):
raise exception.MigrationNotProvided(sql.__name__, path)
return path
def _sync_common_repo(version):
abs_path = find_repo(LEGACY_REPO)
init_version = get_init_version()
with sql.session_for_write() as session:
engine = session.get_bind()
_assert_not_schema_downgrade(version=version)
migration.db_sync(engine, abs_path, version=version,
init_version=init_version, sanity_check=False)
def _sync_repo(repo_name):
abs_path = find_repo(repo_name)
with sql.session_for_write() as session:
engine = session.get_bind()
# Register the repo with the version control API
# If it already knows about the repo, it will throw
# an exception that we can safely ignore
try:
migration.db_version_control(engine, abs_path)
except (migration.exception.DbMigrationError,
exceptions.DatabaseAlreadyControlledError): # nosec
pass
init_version = get_init_version(abs_path=abs_path)
migration.db_sync(engine, abs_path,
init_version=init_version, sanity_check=False)
def get_init_version(abs_path=None):
"""Get the initial version of a migrate repository.
:param abs_path: Absolute path to migrate repository.
:return: initial version number or None, if DB is empty.
"""
if abs_path is None:
abs_path = find_repo(LEGACY_REPO)
repo = migrate.versioning.repository.Repository(abs_path)
# Sadly, Repository has a `latest` but not an `oldest`.
# The value is a VerNum object which needs to be converted into an int.
oldest = int(min(repo.versions.versions))
if oldest < 1:
return None
# The initial version is one less
return oldest - 1
def _assert_not_schema_downgrade(version=None):
if version is not None:
try:
current_ver = int(six.text_type(get_db_version()))
if int(version) < current_ver:
raise migration.exception.DbMigrationError(
_("Unable to downgrade schema"))
except exceptions.DatabaseNotControlledError: # nosec
# NOTE(morganfainberg): The database is not controlled, this action
# cannot be a downgrade.
pass
def offline_sync_database_to_version(version=None):
"""Perform and off-line sync of the database.
Migrate the database up to the latest version, doing the equivalent of
the cycle of --expand, --migrate and --contract, for when an offline
upgrade is being performed.
If a version is specified then only migrate the database up to that
version. Downgrading is not supported. If version is specified, then only
the main database migration is carried out - and the expand, migration and
contract phases will NOT be run.
"""
global USE_TRIGGERS
# This flags let's us bypass trigger setup & teardown for non-rolling
# upgrades. We set this as a global variable immediately before handing off
# to sqlalchemy-migrate, because we can't pass arguments directly to
# migrations that depend on it. We could also register this as a CONF
# option, but the idea here is that we aren't exposing a new API.
USE_TRIGGERS = False
if version:
_sync_common_repo(version)
else:
expand_schema()
migrate_data()
contract_schema()
def get_db_version():
with sql.session_for_read() as session:
return migration.db_version(
session.get_bind(), find_repo(LEGACY_REPO), get_init_version())
def expand_schema():
"""Expand the database schema ahead of data migration.
This is run manually by the keystone-manage command before the first
keystone node is migrated to the latest release.
"""
# Make sure all the legacy migrations are run before we run any new
# expand migrations.
_sync_common_repo(version=None)
_sync_repo(repo_name=EXPAND_REPO)
def migrate_data():
"""Migrate data to match the new schema.
This is run manually by the keystone-manage command once the keystone
schema has been expanded for the new release.
"""
_sync_repo(repo_name=DATA_MIGRATION_REPO)
def contract_schema():
"""Contract the database.
This is run manually by the keystone-manage command once the keystone
nodes have been upgraded to the latest release and will remove any old
tables/columns that are no longer required.
"""
_sync_repo(repo_name=CONTRACT_REPO)
|
|
"""
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
|
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import csv
import logging
import typing as ty
from datetime import date
import collections as coll
import tatsu
from rnacentral_pipeline.databases import data
from rnacentral_pipeline.databases.helpers import publications as pubs
LOGGER = logging.getLogger(__name__)
IDENT_GRAMMAR = r"""
@@grammar::PSI_MI_IDENTIFIER
start = empty:empty | idents $ ;
empty = '-' ;
idents = '|'.{ ident }+ ;
ident = xref:string ':' [ value:string [ '(' description:string ')' ] ] ;
string = ('"' quoted_string '"') | simple_string ;
simple_string = /[^|():\t]+/ ;
quoted_string = /(\\"|[^"])+/ ;
"""
class IdentSemantics(object):
def string(self, ast):
if isinstance(ast, (list, tuple)):
l, v, r = ast
ast = v
ast = ast.replace(r"\"", '"')
return ast
IDENT_MODEL = tatsu.compile(IDENT_GRAMMAR, semantics=IdentSemantics())
def identifiers(raw: str) -> ty.List[data.InteractionIdentifier]:
assert raw, "Must have at least one identifier"
idents = []
for possible in IDENT_MODEL.parse(raw):
if "empty" in possible:
continue
idents.append(
data.InteractionIdentifier(
possible["xref"],
possible.get("value", ""),
possible.get("description", None),
)
)
return idents
def as_taxid(value):
if value == "-":
return None
taxids = identifiers(value)
unique = set()
for tid in taxids:
if tid.value not in unique:
unique.add(tid.value)
assert len(unique) == 1, "Too many taxids in: %s" % value
return int(taxids[0].value)
def as_unique_id(value):
unique_ids = identifiers(value)
if not unique_ids:
return None
assert len(unique_ids) <= 1, "Too many ids: %s" % value
if unique_ids:
return unique_ids[0]
return None
def as_bool(value):
if value == "-" or value.lower() == "true":
return True
if value.lower() == "false":
return False
raise ValueError("Unknown bool value: %s" % value)
def as_date(value):
if value == "-":
return None
year, month, day = value.split("/")
return date(int(year), int(month), int(day))
def as_pubs(value):
refs = []
for ident in identifiers(value):
if ident.key == "pubmed":
try:
refs.append(pubs.reference(ident.value))
except data.UnknownPublicationType:
LOGGER.warn("Could not handle publication %s", ident)
pass
return refs
def stoichiometry(value):
if value == "-":
return None
return int(value)
def as_interactor(row, interactor: data.InteractorType) -> ty.Optional[data.Interactor]:
field_names = {
"ID(s) interactor": ("id", as_unique_id),
"Alt. ID(s) interactor": ("alt_ids", identifiers),
"Alias(es) interactor": ("aliases", identifiers),
"Taxid interactor": ("taxid", as_taxid),
"Biological role(s) interactor": ("biological_role", identifiers),
"Experimental role(s) interactor": ("experimental_role", identifiers),
"Type(s) interactor": ("interactor_type", identifiers),
"Xref(s) interactor": ("xrefs", identifiers),
"Annotation(s) interactor": ("annotations", str),
"Feature(s) interactor": ("features", identifiers),
"Stoichiometry(s) interactor": ("stoichiometry", stoichiometry),
"Identification method participant": (
"participant_identification",
identifiers,
),
}
parts: ty.Dict[str, ty.Any] = {}
for field_template, (key, fn) in field_names.items():
if interactor == data.InteractorType.A and field_template == "ID(s) interactor":
field_template = "#ID(s) interactor"
field_name = "%s %s" % (field_template, interactor.name)
parts[key] = fn(row[field_name])
if not parts["id"]:
from pprint import pprint
pprint(row)
return None
return data.Interactor(**parts)
def as_interaction(row) -> ty.Optional[data.Interaction]:
field_names = {
"Interaction detection method(s)": ("methods", identifiers),
"Publication Identifier(s)": ("publications", as_pubs),
"Interaction type(s)": ("types", identifiers),
"Source database(s)": ("source_database", identifiers),
"Interaction identifier(s)": ("ids", identifiers),
"Confidence value(s)": ("confidence", identifiers),
"Expansion method(s)": ("methods", identifiers),
"Interaction Xref(s)": ("xrefs", identifiers),
"Interaction annotation(s)": ("annotations", identifiers),
"Host organism(s)": ("host_organisms", as_taxid),
"Creation date": ("create_date", as_date),
"Update date": ("update_date", as_date),
"Negative": ("is_negative", as_bool),
}
parts: ty.Dict[str, ty.Any] = {}
for field_name, (key, fn) in field_names.items():
parts[key] = fn(row[field_name])
parts["interactor1"] = as_interactor(row, data.InteractorType.A)
parts["interactor2"] = as_interactor(row, data.InteractorType.B)
if not parts["interactor1"] or not parts["interactor2"]:
return None
return data.Interaction(**parts)
def parse(handle) -> ty.Iterator[data.Interaction]:
rows = csv.DictReader(handle, delimiter="\t", quoting=csv.QUOTE_NONE)
interactions = map(as_interaction, rows)
valid = filter(None, interactions)
return valid
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2015-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import LuigiTestCase, RunOnceTask, with_config
import luigi
import luigi.worker
import luigi.execution_summary
import threading
import datetime
import mock
from enum import Enum
class ExecutionSummaryTest(LuigiTestCase):
def setUp(self):
super(ExecutionSummaryTest, self).setUp()
self.scheduler = luigi.scheduler.Scheduler(prune_on_get_work=False)
self.worker = luigi.worker.Worker(scheduler=self.scheduler)
def run_task(self, task):
self.worker.add(task) # schedule
self.worker.run() # run
def summary_dict(self):
return luigi.execution_summary._summary_dict(self.worker)
def summary(self):
return luigi.execution_summary.summary(self.worker)
def test_all_statuses(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
def complete(self):
if self.num == 1:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 6 tasks of which:',
'* 1 present dependencies were encountered:',
' - 1 Bar(num=1)',
'* 3 ran successfully:',
' - 3 Bar(num=2,3,4)',
'* 1 failed:',
' - 1 Bar(num=0)',
'* 1 were left pending, among these:',
' * 1 had failed dependencies:',
' - 1 Foo()',
'',
'This progress looks :( because there were failed tasks',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_check_complete_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
raise Exception
return True
class Foo(luigi.Task):
def requires(self):
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['still_pending_not_ext'])
self.assertEqual({Foo()}, d['upstream_scheduling_error'])
self.assertEqual({Bar()}, d['scheduling_error'])
self.assertFalse(d['not_run'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 2 tasks of which:',
'* 1 failed scheduling:',
' - 1 Bar()',
'* 1 were left pending, among these:',
" * 1 had dependencies whose scheduling failed:",
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose scheduling failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_not_run_error(self):
class Bar(luigi.Task):
def complete(self):
return True
class Foo(luigi.Task):
def requires(self):
yield Bar()
def new_func(*args, **kwargs):
return None
with mock.patch('luigi.scheduler.Scheduler.add_task', new_func):
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['still_pending_not_ext'])
self.assertEqual({Foo()}, d['not_run'])
self.assertEqual({Bar()}, d['already_done'])
self.assertFalse(d['upstream_scheduling_error'])
self.assertFalse(d['scheduling_error'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 2 tasks of which:',
'* 1 present dependencies were encountered:',
' - 1 Bar()',
'* 1 were left pending, among these:',
" * 1 was not granted run permission by the scheduler:",
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :| because there were tasks that were not granted run permission by the scheduler',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_deps_error(self):
class Bar(luigi.Task):
def run(self):
pass
def complete(self):
return True
class Foo(luigi.Task):
def requires(self):
raise Exception
yield Bar()
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo()}, d['scheduling_error'])
self.assertFalse(d['upstream_scheduling_error'])
self.assertFalse(d['not_run'])
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 1 tasks of which:',
'* 1 failed scheduling:',
' - 1 Foo()',
'',
'Did not run any tasks',
'This progress looks :( because there were tasks whose scheduling failed',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
@with_config({'execution_summary': {'summary-length': '1'}})
def test_config_summary_limit(self):
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
pass
def complete(self):
return True
class Biz(Bar):
pass
class Bat(Bar):
pass
class Wut(Bar):
pass
class Foo(luigi.Task):
def requires(self):
yield Bat(1)
yield Wut(1)
yield Biz(1)
for i in range(4):
yield Bar(i)
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Bat(1), Wut(1), Biz(1), Bar(0), Bar(1), Bar(2), Bar(3)}, d['already_done'])
self.assertEqual({Foo()}, d['completed'])
self.assertFalse(d['failed'])
self.assertFalse(d['upstream_failure'])
self.assertFalse(d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertFalse(d['still_pending_ext'])
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 8 tasks of which:',
'* 7 present dependencies were encountered:',
' - 4 Bar(num=0...3)',
' ...',
'* 1 ran successfully:',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_upstream_not_running(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 1:
return True
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield ExternalBar(i)
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(num=1)}, d['already_done'])
self.assertEqual({Bar(num=1), Bar(num=2), Bar(num=3), Bar(num=4)}, d['completed'])
self.assertEqual({Bar(num=0)}, d['failed'])
self.assertEqual({Foo()}, d['upstream_failure'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar(num=0), ExternalBar(num=2), ExternalBar(num=3), ExternalBar(num=4)}, d['still_pending_ext'])
s = self.summary()
self.assertIn('\n* 1 present dependencies were encountered:\n - 1 ExternalBar(num=1)\n', s)
self.assertIn('\n* 4 ran successfully:\n - 4 Bar(num=1...4)\n', s)
self.assertIn('\n* 1 failed:\n - 1 Bar(num=0)\n', s)
self.assertIn('\n* 5 were left pending, among these:\n * 4 were missing external dependencies:\n - 4 ExternalBar(num=', s)
self.assertIn('\n * 1 had failed dependencies:\n'
' - 1 Foo()\n'
' * 1 had missing external dependencies:\n'
' - 1 Foo()\n\n'
'This progress looks :( because there were failed tasks\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
d = self.summary_dict()
self.assertEqual({LockTask()}, d['run_by_other_worker'])
self.assertEqual({ParentTask()}, d['upstream_run_by_other_worker'])
s = self.summary()
self.assertIn('\nScheduled 2 tasks of which:\n'
'* 2 were left pending, among these:\n'
' * 1 were being run by another worker:\n'
' - 1 LockTask()\n'
' * 1 had dependencies that were being run by other worker:\n'
' - 1 ParentTask()\n', s)
self.assertIn('\n\nThe other workers were:\n'
' - other_worker ran 1 tasks\n\n'
'Did not run any tasks\n'
'This progress looks :) because there were no failed '
'tasks or missing external dependencies\n', s)
self.assertNotIn('\n\n\n', s)
def test_already_running_2(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.Scheduler.get_work
def new_func(*args, **kwargs):
new_kwargs = kwargs.copy()
new_kwargs['worker'] = 'other_worker'
old_func(*args, **new_kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.Scheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['not_run'])
self.assertEqual({AlreadyRunningTask()}, d['run_by_other_worker'])
def test_not_run(self):
class AlreadyRunningTask(luigi.Task):
def run(self):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(AlreadyRunningTask()) # This also registers this worker
old_func = luigi.scheduler.Scheduler.get_work
def new_func(*args, **kwargs):
kwargs['current_tasks'] = None
old_func(*args, **kwargs)
return old_func(*args, **kwargs)
with mock.patch('luigi.scheduler.Scheduler.get_work', new_func):
self.run_task(AlreadyRunningTask())
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({AlreadyRunningTask()}, d['not_run'])
s = self.summary()
self.assertIn('\nScheduled 1 tasks of which:\n'
'* 1 were left pending, among these:\n'
' * 1 was not granted run permission by the scheduler:\n'
' - 1 AlreadyRunningTask()\n', s)
self.assertNotIn('\n\n\n', s)
def test_somebody_else_finish_task(self):
class SomeTask(RunOnceTask):
pass
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run()
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['not_run'])
def test_somebody_else_disables_task(self):
class SomeTask(luigi.Task):
def complete(self):
return False
def run(self):
raise ValueError()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
self.worker.add(SomeTask())
other_worker.add(SomeTask())
other_worker.run() # Assuming it is disabled for a while after this
self.worker.run()
d = self.summary_dict()
self.assertFalse(d['already_done'])
self.assertFalse(d['completed'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({SomeTask()}, d['not_run'])
def test_larger_tree(self):
class Dog(RunOnceTask):
def requires(self):
yield Cat(2)
class Cat(luigi.Task):
num = luigi.IntParameter()
def __init__(self, *args, **kwargs):
super(Cat, self).__init__(*args, **kwargs)
self.comp = False
def run(self):
if self.num == 2:
raise ValueError()
self.comp = True
def complete(self):
if self.num == 1:
return True
else:
return self.comp
class Bar(RunOnceTask):
num = luigi.IntParameter()
def requires(self):
if self.num == 0:
yield ExternalBar()
yield Cat(0)
if self.num == 1:
yield Cat(0)
yield Cat(1)
if self.num == 2:
yield Dog()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
yield Bar(i)
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Cat(num=1)}, d['already_done'])
self.assertEqual({Cat(num=0), Bar(num=1)}, d['completed'])
self.assertEqual({Cat(num=2)}, d['failed'])
self.assertEqual({Dog(), Bar(num=2), Foo()}, d['upstream_failure'])
self.assertEqual({Bar(num=0), Foo()}, d['upstream_missing_dependency'])
self.assertFalse(d['run_by_other_worker'])
self.assertEqual({ExternalBar()}, d['still_pending_ext'])
s = self.summary()
self.assertNotIn('\n\n\n', s)
def test_with_dates(self):
""" Just test that it doesn't crash with date params """
start = datetime.date(1998, 3, 23)
class Bar(RunOnceTask):
date = luigi.DateParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(days=i)
yield Bar(date=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('date=1998-0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_minutes(self):
start = datetime.datetime(1998, 3, 23, 1, 50)
class Bar(RunOnceTask):
time = luigi.DateMinuteParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(300):
new_time = start + datetime.timedelta(minutes=i)
yield Bar(time=new_time)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(minutes=i)) for i in range(300)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(time=1998-03-23T0150...1998-03-23T0649)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_one_param(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(11):
yield Bar(i)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(i) for i in range(11)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('Bar(num=0...10)', s)
self.assertNotIn('\n\n\n', s)
def test_with_ranges_multiple_params(self):
class Bar(RunOnceTask):
num1 = luigi.IntParameter()
num2 = luigi.IntParameter()
num3 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(5, i, 25)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(5, i, 25) for i in range(5)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('- 5 Bar(num1=5, num2=0...4, num3=25)', s)
self.assertNotIn('\n\n\n', s)
def test_with_two_tasks(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(2):
yield Bar(i, 2 * i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({Foo(), Bar(num=0, num2=0), Bar(num=1, num2=2)}, d['completed'])
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 3 tasks of which:',
'* 3 ran successfully:',
' - 2 Bar(num=0, num2=0) and Bar(num=1, num2=2)',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_really_long_param_name(self):
class Bar(RunOnceTask):
This_is_a_really_long_parameter_that_we_should_not_print_out_because_people_will_get_annoyed = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(0)
self.run_task(Foo())
s = self.summary()
self.assertIn('Bar(...)', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_multiple_params_multiple_same_task_family(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
summary = self.summary()
result = summary.split('\n')
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 5 tasks of which:',
'* 5 ran successfully:',
' - 4 Bar(num=0, num2=0) ...',
' - 1 Foo()',
'',
'This progress looks :) because there were no failed tasks or missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_happy_smiley_face_normal(self):
class Bar(RunOnceTask):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(4):
yield Bar(i, 2 * i)
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_happy_smiley_face_other_workers(self):
lock1 = threading.Lock()
lock2 = threading.Lock()
class ParentTask(RunOnceTask):
def requires(self):
yield LockTask()
class LockTask(RunOnceTask):
def run(self):
lock2.release()
lock1.acquire()
self.comp = True
lock1.acquire()
lock2.acquire()
other_worker = luigi.worker.Worker(scheduler=self.scheduler, worker_id="other_worker")
other_worker.add(ParentTask())
t1 = threading.Thread(target=other_worker.run)
t1.start()
lock2.acquire()
self.run_task(ParentTask())
lock1.release()
t1.join()
s = self.summary()
self.assertIn('\nThis progress looks :) because there were no failed tasks or missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_sad_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Bar(luigi.Task):
num = luigi.IntParameter()
def run(self):
if self.num == 0:
raise ValueError()
class Foo(luigi.Task):
def requires(self):
for i in range(5):
yield Bar(i)
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :( because there were failed tasks', s)
self.assertNotIn("Did not run any tasks", s)
self.assertNotIn('\n\n\n', s)
def test_neutral_smiley_face(self):
class ExternalBar(luigi.ExternalTask):
def complete(self):
return False
class Foo(luigi.Task):
def requires(self):
yield ExternalBar()
self.run_task(Foo())
s = self.summary()
self.assertIn('\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_did_not_run_any_tasks(self):
class ExternalBar(luigi.ExternalTask):
num = luigi.IntParameter()
def complete(self):
if self.num == 5:
return True
return False
class Foo(luigi.Task):
def requires(self):
for i in range(10):
yield ExternalBar(i)
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual({ExternalBar(5)}, d['already_done'])
self.assertEqual({ExternalBar(i) for i in range(10) if i != 5}, d['still_pending_ext'])
self.assertEqual({Foo()}, d['upstream_missing_dependency'])
s = self.summary()
self.assertIn('\n\nDid not run any tasks\nThis progress looks :| because there were missing external dependencies', s)
self.assertNotIn('\n\n\n', s)
def test_example(self):
class MyExternal(luigi.ExternalTask):
def complete(self):
return False
class Boom(luigi.Task):
this_is_a_really_long_I_mean_way_too_long_and_annoying_parameter = luigi.IntParameter()
def requires(self):
for i in range(5, 200):
yield Bar(i)
class Foo(luigi.Task):
num = luigi.IntParameter()
num2 = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class Bar(luigi.Task):
num = luigi.IntParameter()
def complete(self):
return True
class DateTask(luigi.Task):
date = luigi.DateParameter()
num = luigi.IntParameter()
def requires(self):
yield MyExternal()
yield Boom(0)
class EntryPoint(luigi.Task):
def requires(self):
for i in range(10):
yield Foo(100, 2 * i)
for i in range(10):
yield DateTask(datetime.date(1998, 3, 23) + datetime.timedelta(days=i), 5)
self.run_task(EntryPoint())
summary = self.summary()
expected = ['',
'===== Luigi Execution Summary =====',
'',
'Scheduled 218 tasks of which:',
'* 195 present dependencies were encountered:',
' - 195 Bar(num=5...199)',
'* 1 ran successfully:',
' - 1 Boom(...)',
'* 22 were left pending, among these:',
' * 1 were missing external dependencies:',
' - 1 MyExternal()',
' * 21 had missing external dependencies:',
' - 10 DateTask(date=1998-03-23...1998-04-01, num=5)',
' - 1 EntryPoint()',
' - 10 Foo(num=100, num2=0) ...',
'',
'This progress looks :| because there were missing external dependencies',
'',
'===== Luigi Execution Summary =====',
'']
result = summary.split('\n')
self.assertEqual(len(result), len(expected))
for i, line in enumerate(result):
self.assertEqual(line, expected[i])
def test_with_datehours(self):
""" Just test that it doesn't crash with datehour params """
start = datetime.datetime(1998, 3, 23, 5)
class Bar(RunOnceTask):
datehour = luigi.DateHourParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_date = start + datetime.timedelta(hours=i)
yield Bar(datehour=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(hours=i)) for i in range(10)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('datehour=1998-03-23T0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_months(self):
""" Just test that it doesn't crash with month params """
start = datetime.datetime(1998, 3, 23)
class Bar(RunOnceTask):
month = luigi.MonthParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(3):
new_date = start + datetime.timedelta(days=30*i)
yield Bar(month=new_date)
self.run_task(Foo())
d = self.summary_dict()
exp_set = {Bar(start + datetime.timedelta(days=30*i)) for i in range(3)}
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('month=1998-0', s)
self.assertIn('Scheduled 4 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_multiple_dash_dash_workers(self):
"""
Don't print own worker with ``--workers 2`` setting.
"""
self.worker = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=2)
class Foo(RunOnceTask):
pass
self.run_task(Foo())
d = self.summary_dict()
self.assertEqual(set(), d['run_by_other_worker'])
s = self.summary()
self.assertNotIn('The other workers were', s)
self.assertIn('This progress looks :) because there were no failed ', s)
self.assertNotIn('\n\n\n', s)
def test_with_uncomparable_parameters(self):
"""
Don't rely on parameters being sortable
"""
class Color(Enum):
red = 1
yellow = 2
class Bar(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
class Baz(RunOnceTask):
eparam = luigi.EnumParameter(enum=Color)
another_param = luigi.IntParameter()
class Foo(luigi.Task):
def requires(self):
yield Bar(Color.red)
yield Bar(Color.yellow)
yield Baz(Color.red, 5)
yield Baz(Color.yellow, 5)
self.run_task(Foo())
s = self.summary()
self.assertIn('yellow', s)
def test_with_dict_dependency(self):
""" Just test that it doesn't crash with dict params in dependencies """
args = dict(start=datetime.date(1998, 3, 23), num=3)
class Bar(RunOnceTask):
args = luigi.DictParameter()
class Foo(luigi.Task):
def requires(self):
for i in range(10):
new_dict = args.copy()
new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i))
yield Bar(args=new_dict)
self.run_task(Foo())
d = self.summary_dict()
exp_set = set()
for i in range(10):
new_dict = args.copy()
new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i))
exp_set.add(Bar(new_dict))
exp_set.add(Foo())
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('"num": 3', s)
self.assertIn('"start": "1998-0', s)
self.assertIn('Scheduled 11 tasks', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
def test_with_dict_argument(self):
""" Just test that it doesn't crash with dict params """
args = dict(start=str(datetime.date(1998, 3, 23)), num=3)
class Bar(RunOnceTask):
args = luigi.DictParameter()
self.run_task(Bar(args=args))
d = self.summary_dict()
exp_set = set()
exp_set.add(Bar(args=args))
self.assertEqual(exp_set, d['completed'])
s = self.summary()
self.assertIn('"num": 3', s)
self.assertIn('"start": "1998-0', s)
self.assertIn('Scheduled 1 task', s)
self.assertIn('Luigi Execution Summary', s)
self.assertNotIn('00:00:00', s)
self.assertNotIn('\n\n\n', s)
|
|
"""
I would like to remove this to a dedicated async HTTP server.
Upload flow:
1 Upload handled by nginx (written to /tmp).
2 Upload handed off to uWSGI application, which auths users and performs
validation.
3 Upload handed off to aiohttp server, temp file path passed.
4 Application waits for aiohttp and nginx waits for application. BUT perhaps
X-Accel-Redirect can be used even for this upload... Then uWSGI can get out
of the way.
- https://www.nginx.com/resources/wiki/modules/upload/
[User] -> [Nginx] -> [uWSGI] -> [AIOHTTP]
| ^
| |
+--------[disk]--------+
Download flow:
1 Download request hits application via nginx.
2 Perform validation and auth.
3 Redirect nginx to aiohttp, which will stream chunks via nginx to caller.
- https://kovyrin.net/2010/07/24/nginx-fu-x-accel-redirect-remote/
[User] <- [Nginx] <-> [uWSGI]
^
|
v
[AIOHTTP]
"""
import collections
import random
import logging
import mimetypes
from os.path import join as pathjoin
from os.path import split as pathsplit
from os.path import (
dirname, basename
)
from hashlib import md5, sha1
import magic
from django.conf import settings
from django.core.cache import caches
from django.db import transaction
from main.models import (
UserDir, UserFile, File, FileTag, Chunk, ChunkStorage,
)
from main.fs.raid import chunker
from main.fs.array import get_shared_arrays
from main.fs.errors import (
DirectoryNotFoundError, FileNotFoundError, PathNotFoundError,
DirectoryConflictError, FileConflictError
)
REPLICAS = 2
LOGGER = logging.getLogger(__name__)
CHUNK_CACHE = caches['chunks']
DirectoryListing = collections.namedtuple('DirectoryListing',
('dir', 'dirs', 'files'))
class MultiCloudBase(object):
"""
Base class for interacting with multiple clouds.
"""
def __init__(self, user):
self.user = user
self.storage = list(user.storages.all())
self.storage.extend(get_shared_arrays())
class FileLikeBase(object):
"""
Implement File-like methods.
Implements methods shared by both MultiCloudReader and MultiCloudWriter.
"""
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
def tell(self):
# This should be fairly easy to implement. For MultiCloudWriter, we
# have size, and could keep count on MultiCloudReader to provided this.
raise NotImplementedError()
def seek(self):
# TODO: This could possibly be implemented, and could be useful for
# range requests.
raise NotImplementedError()
def flush(self):
# N/A
pass
def close(self):
# No resources to release, so mark as closed.
if self._closed:
raise IOError('Already closed.')
self._closed = True
class MultiCloudReader(MultiCloudBase, FileLikeBase):
"""
File-like object that reads from multiple clouds.
"""
def __init__(self, user, version):
super().__init__(user)
self.version = version
self.chunks = list(
Chunk.objects.filter(
filechunks__version=version).order_by('filechunks__serial')
)
self._buffer = []
self._closed = False
def _read_chunk(self):
try:
chunk = self.chunks.pop(0)
except IndexError:
raise EOFError('out of chunks')
data = CHUNK_CACHE.get('chunk:%s' % chunk.uid)
if data is not None:
return chunk.unpack(data)
# Try providers in random order.
for cs in sorted(chunk.storages.all(), key=lambda k: random.random()):
# Since chunks are shared with other users, we need to get the
# client for the chunk, not one of the clients for the current
# user.
client = cs.storage.get_client()
try:
data = client.download(chunk)
except Exception as e:
LOGGER.exception(e)
continue
unpacked = chunk.unpack(data)
CHUNK_CACHE.set('chunk:%s' % chunk.uid, data)
return unpacked
raise IOError('Failed to read chunk %s' % chunk.uid)
def __iter__(self):
if self._closed:
raise IOError('I/O operation on closed file.')
while self.chunks:
yield self.read()
def read(self, size=-1): # NOQA
"""
Read series of chunks from multiple clouds.
"""
if self._closed:
raise IOError('I/O operation on closed file.')
try:
return self._read_chunk()
except EOFError:
return
class MultiCloudWriter(MultiCloudBase, FileLikeBase):
"""
File-like object that writes to multiple clouds.
"""
def __init__(self, user, file, version,
chunk_size=settings.CLOUDSTRYPE_CHUNK_SIZE,
replicas=REPLICAS):
super().__init__(user)
self.mime = mimetypes.guess_type(file.name, strict=False)
self.version = version
self.chunk_size = chunk_size
self.replicas = replicas
self._md5 = md5()
self._sha1 = sha1()
self._size = 0
self._buffer = []
self._closed = False
def _write_chunk_replicas(self, chunk, data):
storages = sorted(self.storage, key=lambda k: random.random())
replicas = 0
# Try to upload up to three times.
for retry in range(3):
# Try each remaining provider in turn.
for storage in storages:
client = storage.get_client()
try:
attrs = client.upload(chunk, data)
except Exception as e:
LOGGER.exception(e, exc_info=True)
# Try next provider.
continue
else:
# Remove this storage provider from the list.
cs = ChunkStorage(chunk=chunk, storage=storages.pop(0))
cs.attrs = attrs or {}
cs.save()
chunk.storages.add(cs)
replicas += 1
# If we reach our goal, return, we are done.
if replicas == self.replicas + 1:
return
# If we get here, we failed to reach our replica goal.
raise IOError('Failed to write chunk')
def _write_chunk(self, data):
"""
Write a single chunk.
Writes chunk to multiple clouds.
"""
chunk = Chunk.objects.create(size=len(data), user=self.user)
data = chunk.pack(data)
# Try to write replicas. If this fails, it raises.
self._write_chunk_replicas(chunk, data)
# Freshen the cache.
CHUNK_CACHE.set('chunk:%s' % chunk.uid, data)
self.version.add_chunk(chunk)
def write(self, data):
"""
Write data to multiple clouds.
Uses a write-through buffer to ensure each chunk is the proper size.
Close flushes the remainder of the buffer.
"""
if self._closed:
raise IOError('I/O operation on closed file.')
if self._size == 0:
# First block. See if we can get a more specific mime type by
# examining the data.
mime = magic.from_buffer(data, mime=True)
# Choose the better mimetype somehow, self.mime is determined by
# the filename. mime is determined by magic.
if not self.mime or mime != 'application/octet-strem':
self.mime = mime
self._size += len(data)
self._md5.update(data)
self._sha1.update(data)
self._write_chunk(data)
def close(self):
"""
Finalize file by writing attributes.
"""
super().close()
# Update content related attributes.
self.version.size = self._size
self.version.md5 = self._md5.hexdigest()
self.version.sha1 = self._sha1.hexdigest()
# Flush to db.
self.version.save(update_fields=['size', 'md5', 'sha1'])
class MultiCloudFilesystem(MultiCloudBase):
def __init__(self, user, chunk_size=settings.CLOUDSTRYPE_CHUNK_SIZE,
replicas=0):
super().__init__(user)
self.chunk_size = chunk_size
self.level = user.get_option('raid_level', 0)
self.replicas = user.get_option('raid_replicas', replicas)
def download(self, path, file=None, version=None):
"""
Download from multiple storage.
Uses Metastore backend to resolve path to a series of chunks. Returns a
MultiCloudReader that can read these chunks in order.
"""
# If caller did not give a file (only a path), lookup the file by path.
if file is None:
try:
file = UserFile.objects.get(path=path, user=self.user)
except UserFile.DoesNotExist:
raise FileNotFoundError(path)
# If caller did not specify version, select the current one.
if version is None:
version = file.file.version
return MultiCloudReader(self.user, version)
@transaction.atomic
def upload(self, path, f):
"""
Upload to multiple storage.
Reads the provided file-like object as a series of chunks, writing each
to multiple cloud providers. Stores chunk information into the
Metastore backend.
"""
assert len(self.storage) >= self.replicas, \
'not enough storage (%s) for %s replicas' % (len(self.storage),
self.replicas)
try:
# Check user's hierarchy for the file.
user_file = UserFile.objects.get(path=path, user=self.user)
# If it exists, make a new version of it.
version = user_file.file.add_version()
except UserFile.DoesNotExist:
# Place the new file into the user's hierarchy.
user_file = UserFile.objects.create(
path=path, name=basename(path), user=self.user)
# Grab ref to version, since we upload to THAT.
version = user_file.file.version
# Upload the file.
size, count = 0, 0
with MultiCloudWriter(self.user, user_file, version,
chunk_size=self.chunk_size,
replicas=self.replicas) as out:
for data in chunker(f, chunk_size=self.chunk_size):
size += len(data)
count += 1
out.write(data)
return user_file
@transaction.atomic
def delete(self, path, file=None):
"""
Delete from multiple storage.
If path is a file it is deleted (as described below). If path is a
directory then it is simply removed from the Metastore.
Uses Metastore backend to resolve path to a series of chunks. Deletes
the chunks from cloud providers and Metastore backend.
"""
if file is None:
try:
file = UserFile.objects.get(path=path, user=self.user)
except UserFile.DoesNotExist:
raise FileNotFoundError(path)
file.delete()
def mkdir(self, path):
if self.isfile(path):
raise FileConflictError(path)
return UserDir.objects.create(path=path, user=self.user)
def rmdir(self, path, dir=None):
if dir is None:
try:
dir = UserDir.objects.get(path=path, user=self.user)
except UserDir.DoesNotExist:
raise DirectoryNotFoundError(path)
dir.delete()
@transaction.atomic
def _move_file(self, file, dst):
if self.isdir(dst):
raise DirectoryConflictError(dst)
dst, file.name = pathsplit(dst.lstrip('/'))
if dst:
try:
file.parent = \
UserDir.objects.get(path=dst, user=self.user)
except UserDir.DoesNotExist:
raise DirectoryNotFoundError(dst)
file.save(update_fields=['parent', 'name'])
return file
@transaction.atomic
def _move_dir(self, dir, dst):
if self.isfile(dst):
raise DirectoryConflictError(dst)
try:
# First try moving the directory to the given path. If this fails,
# it means the given path does not exist, and thus the given path
# is the intended name of dir.
dir.parent = \
UserDir.objects.get(path=dst, user=self.user)
except UserDir.DoesNotExist:
# If the given path does not exist (not intended to be parent)
# maybe we just need to rename.
if dirname(dst) == dirname(dir.path):
# This is just a rename...
dir.name = basename(dst)
dir.save(update_fields=['name'])
return dir
# No, in this case, we were asked to move to a non-existant
# directory so raise.
raise DirectoryNotFoundError(dst)
else:
# OK, the parent has been changed, we move it into the requested
# directory.
dir.save(update_fields=['parent'])
return dir
def move(self, src, dst):
# Is it a file?
try:
file = UserFile.objects.get(path=src, user=self.user)
# Move it.
return self._move_file(file, dst)
except UserFile.DoesNotExist:
pass
# No, is it a dir?
try:
dir = UserDir.objects.get(path=src, user=self.user)
# Move it.
return self._move_dir(dir, dst)
except UserDir.DoesNotExist:
pass
# Neither, raise.
raise PathNotFoundError(src)
@transaction.atomic
def _copy_file(self, srcfile, dst):
# If destination is a directory, then put this file inside of it.
if self.isdir(dst):
dst = pathjoin(dst, srcfile.name)
# If destination exists, raise conflict. Remember, this is not an elif,
# so we are checking the original destination if it was not a directory
# or the adjusted one if it was a directory.
if self.exists(dst):
raise FileConflictError(dst)
# Create a new file, attach the version from the original (copy).
file = \
File.objects.create(owner=self.user, version=srcfile.file.version)
# Place the new file into the user's hierarchy.
dstfile = UserFile.objects.create(path=dst, file=file, user=self.user,
attrs=srcfile.attrs)
for tag in srcfile.tags.all():
FileTag.objects.create(file=dstfile, tag=tag)
return dstfile
@transaction.atomic
def _copy_dir(self, srcdir, dst):
# If destination is a directory, then put this directory inside of it.
if self.isdir(dst):
dst = pathjoin(dst, srcdir.name)
# If destination exists, raise conflict. Remember, this is not an elif,
# so we are checking the original destination if it was not a directory
# or the adjusted one if it was a directory.
if self.exists(dst):
raise FileConflictError(dst)
# Clone srcdir first.
dstdir = UserDir.objects.create(path=dst, user=self.user,
attrs=srcdir.attrs)
for tag in srcdir.tags.all():
dstdir.tags.add(tag)
# Then copy children recursively.
for subdir in srcdir.child_dirs.all():
self._copy_dir(subdir, dstdir.path)
for subfile in srcdir.child_files.all():
self._copy_file(subfile, dstdir.path)
return dstdir
def copy(self, src, dst):
# Is it a file?
try:
file = UserFile.objects.get(path=src, user=self.user)
# Copy it.
return self._copy_file(file, dst)
except UserFile.DoesNotExist:
pass
# No, is it a dir?
try:
dir = UserDir.objects.get(path=src, user=self.user)
# Copy it.
return self._copy_dir(dir, dst)
except UserDir.DoesNotExist:
pass
# Neither, raise.
raise PathNotFoundError('src')
def listdir(self, path, dir=None):
if dir is None:
try:
dir = UserDir.objects.get(path=path, user=self.user)
except UserDir.DoesNotExist:
raise DirectoryNotFoundError(path)
return DirectoryListing(
dir,
dir.child_dirs.all(),
dir.child_files.all()
)
def info(self, path, file=None, dir=None):
if file is not None:
return file
if dir is not None:
return dir
try:
return UserFile.objects.get(path=path, user=self.user)
except UserFile.DoesNotExist:
pass
try:
return UserDir.objects.get(path=path, user=self.user)
except UserDir.DoesNotExist:
pass
raise PathNotFoundError(path)
def isdir(self, path):
return UserDir.objects.filter(path=path, user=self.user).exists()
def isfile(self, path):
return UserFile.objects.filter(path=path, user=self.user).exists()
def exists(self, path):
return self.isdir(path) or \
self.isfile(path)
def get_fs(user, **kwargs):
return MultiCloudFilesystem(user, **kwargs)
|
|
import pandas as pd
import numpy as np
import os
from datetime import datetime
import numbers
import requests
import re
import cPickle
from sklearn.cluster import DBSCAN
from matplotlib import colors, cm
from statsmodels.api import OLS
class ChicagoData():
def __init__(self, *args):
self.DATA_PATH = os.path.join(os.path.dirname(__file__), "data/chicago/")
self.CSV_FILE = self.DATA_PATH + "Crimes_2010-2016.csv"
self.df = pd.DataFrame()
self.meta = dict()
self.gun_fbi_codes = ['01A', '2', '3', '04B', '04A', '15']
self.args = args
def filter_df(self, df):
for arg in self.args:
assert len(arg)==2, "Filter must define field and filter values"
assert arg[0] in df.columns
key = arg[0]
val = self._set_list(arg[1])
df = df[df[key].isin(val)].reset_index(drop=True)
return df
def initData(self, **kwargs):
if 'download_data' in kwargs:
if kwargs['download_data']:
self.pull_data()
if 'download_metadata' in kwargs:
if kwargs['download_metadata']:
self.pull_metadata()
if 'download_fbi' in kwargs:
if kwargs['download_fbi']:
self.pull_fbi_codes()
if 'limit' in kwargs:
if kwargs['limit']:
limit = kwargs['limit']
else:
limit = None
if 'repull' in kwargs:
if kwargs['repull']:
self.read_data(limit=limit)
self._apply_weapons_flag()
self.read_meta()
self.merge_meta()
self.df['CITY'] = 'Chicago'
return self
def read_data(self, limit=None):
self.df = pd.read_csv(self.CSV_FILE, nrows=limit)
return self
def read_meta(self):
self.meta['district'] = self._read_district()
self.meta['community'] = self._read_community()
self.meta['beat'] = self._read_beat()
self.meta['census'] = self._read_census()
self.meta['fbi'] = self._read_fbi()
def _read_community(self):
community = pd.read_csv(self.DATA_PATH + 'community_areas.csv')
return community
def _read_beat(self):
beat = pd.read_csv(self.DATA_PATH + 'police_beat.csv')
return beat
def _read_district(self):
police_district = pd.read_csv(self.DATA_PATH + 'police_districts.csv')
return police_district
def _read_census(self):
census = pd.read_csv(self.DATA_PATH + 'census_data.csv')
return census[~np.isnan(census['Community Area Number'])]
def _read_fbi(self):
fbi = pd.read_csv(self.DATA_PATH + 'fbi.csv')
return fbi
def pull_fbi_codes(self):
url = "http://gis.chicagopolice.org/clearmap_crime_sums/crime_types.html"
response = requests.get(url)
content = response.content
codes = re.findall("\r\n\t+.+<br>|\r\n\t+.+</td>", content)
regex = '.*</span><span class="crimetype"><a href="#.*">(.+).*\((.+)\)</a>.*'
special_codes = [re.match(regex, c.replace(' (Index)', '').replace("\r", "").replace("\t", "").replace("\n", "")).groups() for c in codes if '</span><span class="crimetype"><a href=' in c]
special_codes_ordered = [(c[1], c[0]) for c in special_codes]
codes_clean = [re.sub('<td.*\"\d+\">|</[a-zA-Z]+>|<br>', "", c.replace("\r", "").replace("\t", "").replace("\n", "")) for c in codes]
codes_split = [tuple(c.split(' ', 1)) for c in codes_clean if re.match("^\d", c)]
pd.DataFrame(codes_split+special_codes_ordered, columns=['CODE', 'FBI DESCRIPTION']).to_csv(self.DATA_PATH + 'fbi.csv')
return self
def pull_data(self):
os.system("curl 'https://data.cityofchicago.org/api/views/h8e4-zn48/rows.csv?accessType=DOWNLOAD' -o '%sCrimes_2010-2016.csv'" % self.DATA_PATH)
return self
def merge_meta(self):
self.df = self.df.merge(self.meta['district'], how='left', left_on='District', right_on='DIST_NUM', suffixes=('', '_district'))
self.df = self.df.merge(self.meta['community'], how='left', left_on='Community Area', right_on='AREA_NUMBE', suffixes=('', '_community'))
self.df = self.df.merge(self.meta['beat'], how='left', left_on='Beat', right_on='BEAT_NUM', suffixes=('', '_beat'))
self.df = self.df.merge(self.meta['census'], how='left', left_on='Community Area', right_on='Community Area Number')
self.df = self.df.merge(self.meta['fbi'], how='left', left_on='FBI Code', right_on='CODE')
self.df['the_geom_district'] = self.df['the_geom']
return self
def pull_metadata(self):
os.system("curl 'https://data.cityofchicago.org/api/views/z8bn-74gv/rows.csv?accessType=DOWNLOAD' -o '%spolice_stations.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/c7ck-438e/rows.csv?accessType=DOWNLOAD' -o '%sIUCR.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/n9it-hstw/rows.csv?accessType=DOWNLOAD' -o '%spolice_beat.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/24zt-jpfn/rows.csv?accessType=DOWNLOAD' -o '%spolice_districts.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/k9yb-bpqx/rows.csv?accessType=DOWNLOAD' -o '%swards.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/igwz-8jzy/rows.csv?accessType=DOWNLOAD' -o '%scommunity_areas.csv'" % self.DATA_PATH)
os.system("curl 'https://data.cityofchicago.org/api/views/kn9c-c2s2/rows.csv?accessType=DOWNLOAD' -o '%scensus_data.csv'" % self.DATA_PATH)
# CODE LOOKUP: https://datahub.cmap.illinois.gov/dataset/1d2dd970-f0a6-4736-96a1-3caeb431f5e4/resource/d23fc5b1-0bb5-4bcc-bf70-688201534833/download/CDSFieldDescriptions.pdf
os.system("curl 'https://datahub.cmap.illinois.gov/dataset/1d2dd970-f0a6-4736-96a1-3caeb431f5e4/resource/8c4e096e-c90c-4bef-9cf1-9028d094296e/download/ReferenceCCA20102014.csv' -o '%sCMAP_census_data.csv'" % self.DATA_PATH)
return self
def read_census_extended(self, values=None):
census = pd.read_csv("gunviolence/data/chicago/CMAP_census_data.csv")
census['GEOG'] = census['GEOG'].map(lambda x: x.upper())
census_key = pd.read_csv("gunviolence/data/chicago/census_lookup.csv")
col_filter = []
col_levels = []
for c in census.columns:
col = census_key[census_key.Code==c][['Category', 'Variable', 'Code']].values
if len(col)==1:
col = list(col[0])
col = [i.replace('GEOG', 'COMMUNITY AREA NAME') for i in col if isinstance(i, basestring)]
col_filter.append(c)
col_levels.append(tuple(col))
census = census[col_filter]
census.columns = pd.MultiIndex.from_tuples(col_levels, names=['Category', 'Variable', 'Code'])
census_extended = census.T.reset_index(drop=False)
census_extended.index = ['%s: %s' % (row['Category'], row['Variable']) if row['Category'] not in ('COMMUNITY AREA NAME') else row['Category'] for i, row in census_extended.iterrows()]
census_extended.drop(['Code', 'Category', 'Variable'], axis=1, inplace=True)
return census_extended.T
@classmethod
def geom_to_list(cls, df):
for c in df.columns:
if re.match('the_geom.*', c):
df[c] = df[c].map(lambda x: cls._parse_geom(x))
return df
@staticmethod
def _parse_geom(coords):
if isinstance(coords, basestring):
coord_sets = re.match("MULTIPOLYGON \(\(\((.*)\)\)\)", coords).group(1)
coord_strings = [re.sub("\(|\)", "", c).split(" ") for c in coord_sets.split(", ")]
coord_list = tuple([(float(c[1]), float(c[0])) for c in coord_strings])
elif isinstance(coords, (list, tuple)):
coord_list = tuple(coords)
return coord_list
@staticmethod
def communities(df):
community = dict()
community.setdefault('All', {})
census = pd.read_csv("gunviolence/data/chicago/census_data.csv")
census['Community Area Number'] = census['Community Area Number'].fillna('All')
census = census.set_index('Community Area Number')
census.index = [str(int(idx)) if idx!="All" else idx for idx in census.index]
if set(['the_geom_community', 'Community Area']) < set(df.columns):
for index1, row1 in df.iterrows():
community['All'].setdefault('adj_list', []).append(row1['Community Area'])
for index2, row2 in df.iterrows():
community.setdefault(row1['Community Area'], {})
community.setdefault(row2['Community Area'], {})
if index1 > index2:
geom1 = row1['the_geom_community']
geom2 = row2['the_geom_community']
boundary_intersect = set(geom1) & set(geom2)
if len(boundary_intersect) > 0:
community[row1['Community Area']].setdefault('adj_list', []).append(row2['Community Area'])
community[row2['Community Area']].setdefault('adj_list', []).append(row1['Community Area'])
community = pd.DataFrame(community).T
community.index = [str(int(idx)) if idx!="All" else idx for idx in community.index]
return pd.DataFrame(community).join(census).fillna(-1)
@staticmethod
def _set_list(f):
if not isinstance(f, list):
if isinstance(f, (basestring, numbers.Integral)):
return [f]
else:
return list(f)
else:
return f
def _apply_weapons_flag(self):
indexes = []
self.df['WEAPON_FLAG'] = 0
for i, row in self.df.iterrows():
if re.match('.*GUN.*|.*FIREARM.*|.*(?<!NO )WEAPON.*|WEAPON.*', row['Description']) or row['Primary Type']=='WEAPONS VIOLATION':
indexes.append(i)
self.df.loc[indexes, 'WEAPON_FLAG'] = 1
return self
class PivotData(ChicagoData):
def __init__(self, fields, dt_format, *args, **kwargs):
ChicagoData.__init__(self, *args)
kwargs.setdefault('repull', False)
self.fields = self._set_list(fields)
self.dt_format = dt_format
if 'csv' in kwargs:
self.csv = self.DATA_PATH + kwargs['csv']
else:
self.csv = ""
if not kwargs['repull'] and os.path.isfile(self.csv):
self._data = pd.read_csv(self.csv)
else:
self.initData(**kwargs)
self.pivot()
def pivot(self):
data = self.df.copy()
data = self.filter_df(data)
sep = '---'
data['Period'] = data['Date'].map(lambda x: datetime.strptime(x, '%m/%d/%Y %I:%M:%S %p').strftime(self.dt_format))
counts = data.groupby(['Period']+self.fields, as_index=False).count().iloc[:, 0:len(self.fields)+2]
counts.columns = ['Period']+self.fields+['count']
for i, f in enumerate(self.fields):
field_counts = counts[f].map(lambda x: str(x))
if i==0:
counts['fields'] = field_counts
else:
counts['fields'] += sep+field_counts
pivot = counts.pivot('fields', 'Period', 'count')
pivot_split = pivot.reset_index().fields.str.split(sep, expand=True)
pivot_rename = pivot_split.rename(columns={int(k): v for k, v in enumerate(self.fields)})
self._data = pivot_rename.merge(pivot.reset_index(drop=True), left_index=True, right_index=True)
if self.csv:
self._data.to_csv(self.csv, index=False)
return self
def _date_cols(self):
return set(self._data.columns) - set(self.fields)
def norm_data(self, dt_filter, filter_zero=True):
data = self.data.copy()
data.loc[:, self.date_list] = data.loc[:, self.date_list].fillna(0)
norm = np.linalg.norm(data.loc[:, self.date_list].fillna(0))
data.loc[:, 'fill_opacity'] = data[dt_filter]/norm
data.loc[:, 'fill_opacity'] = data.loc[:, 'fill_opacity'] / max(data.loc[:, 'fill_opacity'] )
if filter_zero:
data = data[data[dt_filter]>0].reset_index(drop=True)
return data
def color_data(self, dt_filter, filter_zero=True):
h = cm.get_cmap('RdYlGn')
data = self.norm_data(dt_filter, filter_zero)
data.loc[:, 'fill_color'] = data.loc[:, 'fill_opacity'].map(lambda x: colors.rgb2hex(h(1.0-x)).upper())
return data
def clusters(self):
kms_per_radian = 6371.0088
epsilon = 1.5 / kms_per_radian
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine')
for d in self.date_list:
data = self._data[self._data[d]>0][['Longitude', 'Latitude']]
db.fit(np.radians(data))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
@property
def data(self):
return self._data
@property
def date_list(self):
dt_list = list(self._date_cols())
dt_list.sort()
return dt_list
def _add_percentage(self, census):
for c in census.columns:
if 'Age Cohorts' in c or 'Race and Ethnicity' in c:
total_pop = 'General Population: Total Population'
if c!=total_pop:
census['%s - Percent' % c] = census[c]/census[total_pop]
elif 'Employment Status' in c or 'Mode of Travel to Work' in c:
total_employment = 'Employment Status: Population 16+ (Labor)'
if c!=total_employment:
census['%s - Percent' % c] = census[c]/census[total_employment]
elif 'Educational Attainment' in c:
education = 'Educational Attainment: Population 25+ (Education)'
if c!=education:
census['%s - Percent' % c] = census[c]/census[education]
elif 'Household Income' in c:
household = 'General Population: Total Households'
if c=='Household Income: Median Income 2010-2014 American Community':
census[c] = census[c]
elif c!=household:
census['%s - Percent' % c] = census[c]/census[household]
elif 'Housing and Tenure' in c or 'Housing Type' in c or 'Housing Size' in c or 'Housing Age' in c:
housing_unit = 'Housing: Housing Unit total'
if c!=housing_unit:
census['%s - Percent' % c] = census[c]/census[housing_unit]
elif 'General Population: Total Population' in c:
sq_footage = 'SHAPE_AREA'
if c!= sq_footage:
census['Population Density'] = census[c]/census[sq_footage]
elif 'General Population: Total Population' in c:
pop_change = 'Population: 2010 Census'
if c!= pop_change:
census['Population Growth - Percent'] = census[c]/census[pop_change]
return census
def community_crimes(dt_format, *args, **kwargs):
data_obj = crimes(dt_format, ['Community Area', 'COMMUNITY', 'the_geom_community'], *args, **kwargs)
return data_obj
def heatmap_crimes(dt_format, *args, **kwargs):
kwargs['csv'] = 'heatmap.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude'], *args, **kwargs)
return data_obj
def district_markers(dt_format, *args, **kwargs):
kwargs['csv'] = 'district_marker.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude', 'DIST_NUM', 'Primary Type'], *args, **kwargs)
return data_obj
def community_markers(dt_format, *args, **kwargs):
kwargs['csv'] = 'community_marker.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude', 'Community Area', 'Primary Type'], *args, **kwargs)
return data_obj
def beat_markers(dt_format, *args, **kwargs):
kwargs['csv'] = 'beat_marker.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude', 'BEAT_NUM', 'Primary Type'], *args, **kwargs)
return data_obj
def incident_markers(dt_format, *args, **kwargs):
kwargs['csv'] = 'incident_marker.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude', 'Location', 'Primary Type'], *args, **kwargs)
return data_obj
def city_markers(dt_format, *args, **kwargs):
kwargs['csv'] = 'city_marker.csv'
data_obj = crimes(dt_format, ['Latitude', 'Longitude', 'CITY', 'Primary Type'], *args, **kwargs)
return data_obj
def crime_descriptions(dt_format, *args, **kwargs):
kwargs['csv'] = 'crime_description.csv'
data_obj = crimes(dt_format, ['Primary Type', 'Description'], *args, **kwargs)
return data_obj
def crime_locations(dt_format, *args, **kwargs):
kwargs['csv'] = 'crime_location.csv'
data_obj = crimes(dt_format, ['Primary Type', 'Location Description'], *args, **kwargs)
return data_obj
def trends(dt_format, *args, **kwargs):
kwargs['csv'] = 'trend.csv'
data_obj = crimes(dt_format, ['CITY'], *args, **kwargs)
return data_obj
def crimes(dt_format, pivot_cols, *args, **kwargs):
cd = ChicagoData()
pivot_cols = cd._set_list(pivot_cols)
kwargs.setdefault('repull', False)
if 'csv' in kwargs:
filepath = cd.DATA_PATH + kwargs['csv']
data_obj = PivotData(pivot_cols, dt_format, *args, **kwargs)
print '%s saved to csv' % filepath
if 'pickle' in kwargs:
filepath = cd.DATA_PATH + kwargs['pickle']
if (not kwargs['repull']) and os.path.isfile(filepath):
f = open(filepath, 'rb')
data_obj = cPickle.load(f)
f.close()
print '%s pickle loaded' % filepath
else:
data_obj = PivotData(pivot_cols, dt_format, *args, **kwargs)
f = open(filepath, 'wb')
data_obj.df = pd.DataFrame([])
cPickle.dump(data_obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
print '%s pickled' % filepath
return data_obj
if __name__=="__main__":
csv = 'community_pivot.csv'
fields = ['Community Area', 'COMMUNITY', 'the_geom_community']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'beat_marker.csv'
fields = ['Latitude', 'Longitude', 'BEAT_NUM', 'Primary Type']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'community_marker.csv'
fields = ['Latitude', 'Longitude', 'Community Area', 'Primary Type']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'incident_marker.csv'
fields = ['Latitude', 'Longitude', 'Location', 'Primary Type']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'heatmap.csv'
fields = ['Latitude', 'Longitude']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'census_correlation.csv'
fields = ['Community Area', 'COMMUNITY', 'the_geom_community']
p = PivotData(fields, '%Y', ['WEAPON_FLAG', 1], ['Year', [2010, 2011, 2012, 2013, 2014]], csv=csv, repull=True)
csv = 'trends.csv'
fields = ['CITY']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'crime_location.csv'
fields = ['Primary Type', 'Location Description']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'district_marker.csv'
fields = ['Latitude', 'Longitude', 'DIST_NUM', 'Primary Type']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'city_marker.csv'
fields = ['Latitude', 'Longitude', 'CITY', 'Primary Type']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
csv = 'crime_description.csv'
fields = ['Primary Type', 'Description']
p = PivotData(fields, '%Y-%m', ['WEAPON_FLAG', 1], csv=csv, repull=True)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow.compat.v1 as tf
from utils.object_detection import minibatch_sampler
from utils.object_detection import ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random_shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.to_int32(
negative_positive_ratio * tf.to_float(num_sampled_pos))
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
|
|
"""
OEShape overlap utilities.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import collections
import numpy as np
from openeye.oechem import *
from openeye.oeshape import *
from oe_utils.shape.color import ColorForceField
class ColorOverlap(OEColorOverlap):
"""
Color overlap.
Parameters
----------
color_ff : int or OEColorForceField, optional (default
OEColorFFType_ImplicitMillsDean)
Color force field.
all_color : bool, optional (default True)
Calculate full pairwise color atom overlaps.
"""
def __init__(
self, color_ff=OEColorFFType_ImplicitMillsDean, all_color=True):
super(ColorOverlap, self).__init__()
if isinstance(color_ff, OEColorForceField):
self.color_ff = color_ff
else:
self.color_ff = OEColorForceField()
self.color_ff.Init(color_ff)
self.SetColorForceField(self.color_ff)
self.SetAllColor(all_color)
self.ref_mol = None
self.color_component_engines = None
def SetRefMol(self, ref_mol):
"""
Set reference molecule.
Parameters
----------
ref_mol : OEMol
Reference molecule.
"""
self.ref_mol = ref_mol
return super(ColorOverlap, self).SetRefMol(ref_mol)
def overlap(self, fit_mol):
"""
Get color overlap results.
Parameters
----------
fit_mol : OEMol
Fit molecule.
"""
result = OEColorResults()
self.ColorScore(fit_mol, result)
return ColorOverlapResult(result)
def get_color_components(self, fit_mol):
"""
Get overlap scores for each color type.
The color overlap is repeated with a series of different color force
fields that each have a single color type defined.
Parameters
----------
fit_mol : OEMol
Fit molecule.
"""
if self.color_component_engines is None:
self.color_component_engines = self.get_color_component_engines()
results = collections.defaultdict(list)
for color_type, color_type_name, engine in self.color_component_engines:
results['overlaps'].append(engine.overlap(fit_mol))
results['color_types'].append(color_type)
results['color_type_names'].append(color_type_name)
return results
def get_color_component_engines(self):
"""
Create a separate ColorOverlap engine for each interaction.
"""
color_component_engines = []
color_ff = ColorForceField(self.color_ff)
for this_color_ff in color_ff.isolate_interactions():
# Get a label for this force field.
# Assume like interactions only, and no duplicates.
# TODO: allow more flexibility here.
interactions = this_color_ff.get_interactions()
assert len(interactions) == 1
assert interactions[0][0] == interactions[0][1]
color_type = interactions[0][0]
color_type_name = this_color_ff.GetTypeName(color_type)
engine = ColorOverlap(
color_ff=this_color_ff, all_color=self.GetAllColor())
engine.SetRefMol(self.ref_mol)
color_component_engines.append(
(color_type, color_type_name, engine))
return color_component_engines
@staticmethod
def group_color_component_results(results):
"""
Extract scores from each overlay into arrays for each score type.
Parameters
----------
scores : array_like
2D array containing color component results.
"""
results = np.atleast_2d(results)
shape = results.shape
keys = [
'color_tanimoto', 'color_overlap', 'ref_self_color',
'fit_self_color']
data = {key: np.zeros(shape, dtype=float) for key in keys}
for i, this_results in enumerate(results):
for j, component_results in enumerate(this_results):
for k, component_result in enumerate(component_results):
for key in keys:
data[key][i, j, k] = getattr(component_result, key)
return data
def get_ref_color_atom_overlaps(self, fit_mol):
"""
Get overlap scores for each reference molecule color atom.
Each color atom in the reference molecule is isolated and the color
overlap with the fit molecule is scored.
Parameters
----------
fit_mol : OEMol
Fit molecule.
"""
results = []
# Use OEMol instead of CreateCopy because otherwise color atoms are
# added to self.ref_mol
colored_ref_mol = OEMol(self.ref_mol)
OEAddColorAtoms(colored_ref_mol, self.color_ff)
assert OECountColorAtoms(self.ref_mol) == 0
ref_color_coords = []
ref_color_types = []
ref_color_type_names = []
for ref_color_atom in OEGetColorAtoms(colored_ref_mol):
coords = colored_ref_mol.GetCoords(ref_color_atom)
ref_color_type = OEGetColorType(ref_color_atom)
ref_color_type_name = self.color_ff.GetTypeName(ref_color_type)
ref_color_coords.append(coords)
ref_color_types.append(ref_color_type)
ref_color_type_names.append(ref_color_type_name)
# Use OEMol instead of CreateCopy because otherwise colored_ref_mol
# color atoms are deleted by OERemoveColorAtoms
this_ref_mol = OEMol(colored_ref_mol)
OERemoveColorAtoms(this_ref_mol)
OEAddColorAtom(this_ref_mol, OEFloatArray(coords), ref_color_type,
ref_color_type_name)
assert OECountColorAtoms(this_ref_mol) == 1
super(ColorOverlap, self).SetRefMol(this_ref_mol)
results.append(self.overlap(fit_mol))
super(ColorOverlap, self).SetRefMol(self.ref_mol) # reset ref mol
return {'overlaps': results,
'ref_color_coords': ref_color_coords,
'ref_color_types': ref_color_types,
'ref_color_type_names': ref_color_type_names}
@staticmethod
def group_ref_color_atom_overlaps(results):
"""
Create a 3D masked array containing all overlap scores.
Parameters
----------
scores : array_like
2D array containing reference molecule color atom overlap results.
"""
# get maximum number of ref color atoms
# don't use `for result in it` because that gives an array of size 1
max_size = 0
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
max_size = max(max_size, len(results[it.multi_index]))
# build a masked array containing results
# don't use data[it.multi_index][:result.size] because that assigns
# to a view and not to data
data = np.ma.masked_all((results.shape[:2] + (max_size,)), dtype=float)
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
i, j = it.multi_index
result = results[i, j]
data[i, j, :result.size] = result
return data
class ColorOverlapResult(object):
"""
Color overlap result.
Parameters
----------
result : OEColorResults
Color overlap result.
"""
def __init__(self, result):
# extract overlap scores
self.color_tanimoto = result.GetTanimoto()
self.color_overlap = result.colorscore
self.ref_self_color = result.refSelfColor
self.fit_self_color = result.fitSelfColor
|
|
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
import os
import sys
import warnings
from argparse import ArgumentParser, HelpFormatter
from io import TextIOBase
import django
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import color_style, no_style
from django.db import DEFAULT_DB_ALIAS, connections
from django.utils.deprecation import RemovedInDjango41Warning
ALL_CHECKS = '__all__'
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
def __init__(self, *args, returncode=1, **kwargs):
self.returncode = returncode
super().__init__(*args, **kwargs)
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, *, missing_args_message=None, called_from_command_line=None, **kwargs):
self.missing_args_message = missing_args_message
self.called_from_command_line = called_from_command_line
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (self.missing_args_message and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
def no_translations(handle_func):
"""Decorator that forces a command to run with translations deactivated."""
def wrapped(*args, **kwargs):
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
res = handle_func(*args, **kwargs)
finally:
if saved_locale is not None:
translation.activate(saved_locale)
return res
return wrapped
class DjangoHelpFormatter(HelpFormatter):
"""
Customized formatter so that command-specific arguments appear in the
--help output before arguments common to all commands.
"""
show_last = {
'--version', '--verbosity', '--traceback', '--settings', '--pythonpath',
'--no-color', '--force-color', '--skip-checks',
}
def _reordered_actions(self, actions):
return sorted(
actions,
key=lambda a: set(a.option_strings) & self.show_last != set()
)
def add_usage(self, usage, actions, *args, **kwargs):
super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs)
def add_arguments(self, actions):
super().add_arguments(self._reordered_actions(actions))
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def flush(self):
if hasattr(self._out, 'flush'):
self._out.flush()
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg='', style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A list or tuple of tags, e.g. [Tags.staticfiles, Tags.models]. System
checks registered in the chosen tags will be checked for errors prior
to executing the command. The value '__all__' can be used to specify
that all system checks should be performed. Default value is '__all__'.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``stealth_options``
A tuple of any options the command uses which aren't defined by the
argument parser.
"""
# Metadata about this command.
help = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
requires_migrations_checks = False
requires_system_checks = '__all__'
# Arguments, common to all commands, which aren't defined by the argument
# parser.
base_stealth_options = ('stderr', 'stdout')
# Command-specific options not defined by the argument parser.
stealth_options = ()
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color and force_color:
raise CommandError("'no_color' and 'force_color' can't be used together.")
if no_color:
self.style = no_style()
else:
self.style = color_style(force_color)
self.stderr.style_func = self.style.ERROR
if self.requires_system_checks in [False, True]:
warnings.warn(
"Using a boolean value for requires_system_checks is "
"deprecated. Use '__all__' instead of True, and [] (an empty "
"list) instead of False.",
RemovedInDjango41Warning,
)
self.requires_system_checks = ALL_CHECKS if self.requires_system_checks else []
if (
not isinstance(self.requires_system_checks, (list, tuple)) and
self.requires_system_checks != ALL_CHECKS
):
raise TypeError('requires_system_checks must be a list or tuple.')
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
prog='%s %s' % (os.path.basename(prog_name), subcommand),
description=self.help or None,
formatter_class=DjangoHelpFormatter,
missing_args_message=getattr(self, 'missing_args_message', None),
called_from_command_line=getattr(self, '_called_from_command_line', None),
**kwargs
)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument(
'-v', '--verbosity', default=1,
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output',
)
parser.add_argument(
'--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument(
'--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".',
)
parser.add_argument('--traceback', action='store_true', help='Raise on CommandError exceptions')
parser.add_argument(
'--no-color', action='store_true',
help="Don't colorize the command output.",
)
parser.add_argument(
'--force-color', action='store_true',
help='Force colorization of the command output.',
)
if self.requires_system_checks:
parser.add_argument(
'--skip-checks', action='store_true',
help='Skip system checks.',
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except CommandError as e:
if options.traceback:
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(e.returncode)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options['force_color'] and options['no_color']:
raise CommandError("The --no-color and --force-color options can't be used together.")
if options['force_color']:
self.style = color_style(force_color=True)
elif options['no_color']:
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options['stderr'])
if self.requires_system_checks and not options['skip_checks']:
if self.requires_system_checks == ALL_CHECKS:
self.check()
else:
self.check(tags=self.requires_system_checks)
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
output = '%s\n%s\n%s' % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
return output
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False, fail_level=checks.ERROR,
databases=None):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
databases=databases,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted({migration.app_label for migration, backwards in plan})
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unapplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s." % {
"unapplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them."))
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+', help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
|
|
"""Test the zerproc lights."""
import pytest
import pyzerproc
from homeassistant import setup
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_RGB_COLOR,
ATTR_XY_COLOR,
SCAN_INTERVAL,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
)
from homeassistant.components.zerproc.light import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.fixture
async def mock_light(hass):
"""Create a mock light entity."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
light = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
mock_state = pyzerproc.LightState(False, (0, 0, 0))
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[light],
), patch.object(light, "connect"), patch.object(
light, "get_state", return_value=mock_state
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
return light
async def test_init(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
mock_light_1 = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
mock_light_2 = pyzerproc.Light("11:22:33:44:55:66", "LEDBlue-33445566")
mock_state_1 = pyzerproc.LightState(False, (0, 0, 0))
mock_state_2 = pyzerproc.LightState(True, (0, 80, 255))
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light_1, mock_light_2],
), patch.object(mock_light_1, "connect"), patch.object(
mock_light_2, "connect"
), patch.object(
mock_light_1, "get_state", return_value=mock_state_1
), patch.object(
mock_light_2, "get_state", return_value=mock_state_2
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
}
state = hass.states.get("light.ledblue_33445566")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-33445566",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_BRIGHTNESS: 255,
ATTR_HS_COLOR: (221.176, 100.0),
ATTR_RGB_COLOR: (0, 80, 255),
ATTR_XY_COLOR: (0.138, 0.08),
}
with patch.object(hass.loop, "stop"), patch.object(
mock_light_1, "disconnect"
) as mock_disconnect_1, patch.object(
mock_light_2, "disconnect"
) as mock_disconnect_2:
await hass.async_stop()
assert mock_disconnect_1.called
assert mock_disconnect_2.called
async def test_discovery_exception(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
side_effect=pyzerproc.ZerprocException("TEST"),
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception should be captured and no entities should be added
assert len(hass.data[DOMAIN]["addresses"]) == 0
async def test_connect_exception(hass):
"""Test platform setup."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_entry = MockConfigEntry(domain=DOMAIN)
mock_entry.add_to_hass(hass)
mock_light = pyzerproc.Light("AA:BB:CC:DD:EE:FF", "LEDBlue-CCDDEEFF")
with patch(
"homeassistant.components.zerproc.light.pyzerproc.discover",
return_value=[mock_light],
), patch.object(
mock_light, "connect", side_effect=pyzerproc.ZerprocException("TEST")
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
# The exception should be captured and no entities should be added
assert len(hass.data[DOMAIN]["addresses"]) == 0
async def test_light_turn_on(hass, mock_light):
"""Test ZerprocLight turn_on."""
utcnow = dt_util.utcnow()
with patch.object(mock_light, "turn_on") as mock_turn_on:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_on.assert_called()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(25, 25, 25)
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_BRIGHTNESS: 25},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(19, 17, 25)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(220, 201, 110)
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (75, 75, 75)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff", ATTR_HS_COLOR: (50, 50)},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(75, 68, 37)
with patch.object(mock_light, "set_color") as mock_set_color:
await hass.services.async_call(
"light",
"turn_on",
{
ATTR_ENTITY_ID: "light.ledblue_ccddeeff",
ATTR_BRIGHTNESS: 200,
ATTR_HS_COLOR: (75, 75),
},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_with(162, 200, 50)
async def test_light_turn_off(hass, mock_light):
"""Test ZerprocLight turn_on."""
with patch.object(mock_light, "turn_off") as mock_turn_off:
await hass.services.async_call(
"light",
"turn_off",
{ATTR_ENTITY_ID: "light.ledblue_ccddeeff"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called()
async def test_light_update(hass, mock_light):
"""Test ZerprocLight update."""
utcnow = dt_util.utcnow()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
}
# Make sure no discovery calls are made while we emulate time passing
with patch("homeassistant.components.zerproc.light.pyzerproc.discover"):
# Test an exception during discovery
with patch.object(
mock_light, "get_state", side_effect=pyzerproc.ZerprocException("TEST")
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_UNAVAILABLE
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(False, (200, 128, 100)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_OFF
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
}
with patch.object(
mock_light,
"get_state",
return_value=pyzerproc.LightState(True, (175, 150, 220)),
):
utcnow = utcnow + SCAN_INTERVAL
async_fire_time_changed(hass, utcnow)
await hass.async_block_till_done()
state = hass.states.get("light.ledblue_ccddeeff")
assert state.state == STATE_ON
assert state.attributes == {
ATTR_FRIENDLY_NAME: "LEDBlue-CCDDEEFF",
ATTR_SUPPORTED_FEATURES: SUPPORT_BRIGHTNESS | SUPPORT_COLOR,
ATTR_BRIGHTNESS: 220,
ATTR_HS_COLOR: (261.429, 31.818),
ATTR_RGB_COLOR: (202, 173, 255),
ATTR_XY_COLOR: (0.291, 0.232),
}
|
|
#!/usr/bin/env python
"""Surge: cryptocurrency data downloader.
Downloads cryptocurrency price data from the public APIs of BitcoinAverage,
CryptoCoinCharts, and Bittrex. Writes to a PostgreSQL database.
"""
from __future__ import division, print_function, unicode_literals, absolute_import
try:
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
except:
pass
import os
import getopt
import json
import time
import datetime
import requests
from decimal import Decimal, ROUND_HALF_EVEN, getcontext
from contextlib import contextmanager
try:
import psycopg2 as db
import psycopg2.extensions as ext
from psycopg2.extras import RealDictCursor
except:
import psycopg2cffi as db
import psycopg2cffi.extensions as ext
from psycopg2cffi.extras import RealDictCursor
# Python 3 compatibility
from six.moves import xrange as range
_IS_PYTHON_3 = sys.version_info[0] == 3
identity = lambda x : x
if _IS_PYTHON_3:
u = identity
else:
import codecs
def u(string):
return codecs.unicode_escape_decode(string)[0]
getcontext().rounding = ROUND_HALF_EVEN
getcontext().prec = 28
# Postgres connection
if not os.environ.get("CONTINUOUS_INTEGRATION"):
conn = db.connect("host=localhost dbname=surge user=surge password=surge")
conn.set_isolation_level(ext.ISOLATION_LEVEL_READ_COMMITTED)
BITCOINAVERAGE_API = "https://api.bitcoinaverage.com/"
CRYPTOCOINCHARTS_API = "http://www.cryptocoincharts.info/v2/api/"
BITTREX_API = "https://bittrex.com/api/v1.1/"
class Surge(object):
def __init__(self, update_all_coins=True, max_retry=0,
verbose=False, coin_list=None, interval=90,
database_check=True):
"""
Args:
update_all_coins (bool):
max_retry (int):
verbose (bool):
coin_list (list):
interval (int):
database_check (bool):
"""
self.full = update_all_coins
self.coin_list = coin_list
self.max_retry = max_retry
self.verbose = verbose
self.interval = interval
if database_check:
self.reset_database()
def reset_database(self):
try:
with cursor() as cur:
cur.execute("SELECT 1 FROM coin_data")
cur.execute("SELECT 1 FROM orderbook")
cur.execute("SELECT 1 FROM bittrex_history")
except:
queries = (
"DROP TABLE IF EXISTS coin_data",
"DROP TABLE IF EXISTS orderbook",
"DROP TABLE IF EXISTS bittrex_history",
"""CREATE TABLE coin_data (
ticker varchar(10),
name varchar(100),
price numeric(24,8),
price_btc numeric(24,8),
volume_btc numeric,
data_source varchar(1000),
last_update timestamp DEFAULT statement_timestamp())""",
"""CREATE TABLE orderbook (
ticker1 varchar(10),
ticker2 varchar(10),
buy_or_sell char(1),
quantity numeric,
rate numeric(24,8),
total numeric(24,8),
data_source varchar(1000),
updated timestamp DEFAULT statement_timestamp())""",
"""CREATE TABLE bittrex_history (
internal_id bigserial NOT NULL PRIMARY KEY,
bittrex_id bigint,
ordertype varchar(25),
price numeric(24,8),
quantity numeric(24,8),
total numeric(24,8),
updated timestamp DEFAULT statement_timestamp())""",
)
with cursor() as cur:
for query in queries:
cur.execute(query)
def update_all(self):
self.update_bitcoinaverage()
self.update_cryptocoincharts()
self.bittrex_orderbook_snapshot()
def bittrex_orderbook_snapshot(self):
"""
Bittrex API
public/getorderbook -> JSON
{
"result": {
"buy":
},
"success": true,
}
"""
if self.verbose:
print("Update Bittrex data:")
try:
bittrex_url = BITTREX_API + "public/getorderbook?market=%s-%s&type=both&depth=%s"
ticker1 = "BTC"
ticker2 = "LTC"
depth = 50
if self.verbose:
print("- Request orderbook from Bittrex (depth: " + str(depth) + ")")
print(bittrex_url % ("ticker1", "ticker2", depth))
if self.verbose:
print("Market: %s-%s" % (ticker1, ticker2))
orderbook = requests.get(bittrex_url % (ticker1, ticker2, depth))
now = datetime.datetime.now()
if orderbook.status_code == 200:
orderbook_dict = orderbook.json()
if orderbook_dict['success']:
if self.verbose:
print("- Got orderbook")
with cursor() as cur:
for buysell in orderbook_dict['result']:
if self.verbose:
print("- Parse", buysell, "orders")
# cur.execute("TRUNCATE orderbook")
orders = orderbook_dict['result'][buysell]
for order in orders:
if self.verbose:
sys.stdout.write('.')
sys.stdout.flush()
query = """INSERT INTO orderbook
(ticker1, ticker2,
buy_or_sell, quantity, rate,
total, data_source, updated)
VALUES
(%(ticker1)s, %(ticker2)s,
%(buy_or_sell)s, %(quantity)s, %(rate)s,
%(total)s, %(data_source)s, %(updated)s)"""
parameters = {
'ticker1': ticker1,
'ticker2': ticker2,
'buy_or_sell': buysell[0],
'quantity': order['Quantity'],
'rate': order['Rate'],
'total': order['Quantity'] * order['Rate'],
'data_source': 'bittrex',
'updated': now,
}
cur.execute(query, parameters)
print()
except requests.ConnectionError as err:
msg = "Error: couldn't connect to Bittrex API"
timestamp = datetime.datetime.now()
if self.verbose:
print(msg)
print(err)
with open(self.log, 'a') as logfile:
error_message = str(timestamp) + '\n' + str(err.message) + '\n' + msg
sys.stdout.write(logfile, error_message)
if self.verbose:
print("Done.")
def update_bitcoinaverage(self):
"""
BitcoinAverage (for BTC)
ticker/USD/last -> last USD/BTC trade (amount in USD)
ticker/global/USD/ -> JSON
{
"24h_avg": 622.37,
"ask": 621.28,
"bid": 620.14,
"last": 621.6,
"timestamp": "Wed, 23 Jul 2014 03:47:00 -0000",
"volume_btc": 8968.24,
"volume_percent": 58.43
}
"""
# BitcoinAverage API (vs USD)
if self.verbose:
print("Update BitcoinAverage data:")
coin = 'BTC'
btc_digits = Decimal(currency_precision(coin))
bitavg_url = BITCOINAVERAGE_API + "ticker/USD/last"
volume_bitavg_url = BITCOINAVERAGE_API + "ticker/global/USD/volume_btc"
if self.verbose:
print("- Fetching Bitcoin data from BitcoinAverage:")
print(bitavg_url)
self.price = requests.get(bitavg_url).json()
self.price = Decimal(self.price).quantize(btc_digits, rounding=ROUND_HALF_EVEN)
volume_btc = Decimal(requests.get(volume_bitavg_url).json()).quantize(
Decimal(".00001"), rounding=ROUND_HALF_EVEN
)
timestamp = datetime.datetime.now()
select_price_query = (
"SELECT price FROM coin_data WHERE ticker = %s"
)
previous_price = None
with cursor() as cur:
cur.execute(select_price_query, (coin,))
for row in cur:
previous_price = row[0]
insert_prices_query = """INSERT INTO coin_data
(name, ticker, price, price_btc,
data_source, last_update)
VALUES
(%(name)s, %(ticker)s, %(price)s, %(price_btc)s,
%(data_source)s, %(last_update)s)"""
insert_prices_parameters = {
'name': 'Bitcoin',
'ticker': coin,
'price': self.price,
'price_btc': Decimal("1.0"),
'data_source': 'BitcoinAverage',
'last_update': timestamp,
}
with cursor() as cur:
cur.execute(insert_prices_query, insert_prices_parameters)
if self.verbose:
print("\nDone.")
def update_cryptocoincharts(self):
"""
CryptoCoinCharts API (for alts)
/v2/listCoins -> list-of-dicts JSON
[{
"id": ticker symbol (e.g. "ltc"),
"name": coin's name (e.g. "Litecoin"),
"website": coin's home page (if any),
"price_btc": price in bitcoins (last traded price @ "best" market),
"volume_btc": volume traded (in bitcoins) over the past 24 hours
},...]
"""
if self.verbose:
print("Update CryptoCoinCharts data:")
btc_digits = Decimal(currency_precision('BTC'))
btc_price = None
btc_price = requests.get(BITCOINAVERAGE_API + "ticker/USD/last").json()
btc_price = Decimal(btc_price).quantize(btc_digits,
rounding=ROUND_HALF_EVEN)
# Get altcoin data from CryptoCoinCharts API
url = CRYPTOCOINCHARTS_API + "listCoins"
if self.verbose:
print("- Fetching data from CryptoCoinCharts API")
print(url)
coin_price_list = requests.get(url).json()
timestamp = datetime.datetime.now()
if self.verbose:
num_coins = len(coin_price_list)
print("-", num_coins, "coins found")
for i, coin in enumerate(coin_price_list):
digits = Decimal(currency_precision(coin['id']))
if self.coin_list is None or (self.coin_list is not None and \
coin['id'].upper() in self.coin_list):
decimal_price_btc = Decimal(coin['price_btc']).quantize(digits)
decimal_price_usd = decimal_price_btc * btc_price
decimal_volume_btc = Decimal(coin['volume_btc']).quantize(btc_digits)
select_price_query = "SELECT price_btc FROM coin_data WHERE name = %s"
previous_price = None
with cursor() as cur:
cur.execute(select_price_query, (coin['name'], ))
for row in cur:
previous_price = row[0]
# Insert coin data into database
insert_prices_query = """INSERT INTO coin_data
(name, ticker, price,
price_btc, volume_btc,
data_source, last_update)
VALUES
(%(name)s, %(ticker)s, %(price)s,
%(price_btc)s, %(volume_btc)s,
%(data_source)s, %(last_update)s)"""
insert_prices_parameters = {
'name': coin['name'],
'ticker': coin['id'].upper(),
'price': decimal_price_usd,
'price_btc': decimal_price_btc,
'volume_btc': decimal_volume_btc,
'data_source': 'CryptoCoinCharts',
'last_update': timestamp,
}
with cursor() as cur:
if self.verbose:
count = i + 1
progress = round(count / float(num_coins), 3)
sys.stdout.write("Loading coin data: " + str(count) + "/" +\
str(num_coins) + " processed [" +\
str(progress * 100) + "%] \r")
sys.stdout.flush()
cur.execute(insert_prices_query,
insert_prices_parameters)
else:
if self.verbose:
print("\nDone.")
return
if self.verbose:
sys.stdout.flush()
print("\nFinished, with errors.")
def update_loop(self):
restart_counter = 0
while True:
if self.verbose:
print("Starting update loop (interval:", self.interval, "sec)")
try:
while True:
self.update_cryptocoincharts()
self.bittrex_orderbook_snapshot()
time.sleep(self.interval - time.time() % self.interval)
except Exception as exc:
print("Exception during data update loop:")
print(exc)
time.sleep(5)
if self.max_retry != -1 and restart_counter > self.max_retry:
print("Number of restarts", restart_counter, "exceeds the maximum number of allowed retries", self.max_retry, ". Exiting...")
return 2
else:
restart_counter += 1
print("Restarting...")
@contextmanager
def cursor(cursor_factory=False):
"""Database cursor generator. Commit on context exit."""
try:
if cursor_factory:
cur = conn.cursor(cursor_factory=RealDictCursor)
else:
cur = conn.cursor()
yield cur
except (db.Error, Exception) as e:
cur.close()
if conn:
conn.rollback()
print(e.message)
raise
else:
conn.commit()
cur.close()
def currency_precision(currency_code):
if currency_code.upper() == 'NXT':
precision = '.01'
elif currency_code.upper() == 'XRP':
precision = '.000001'
else:
precision = '.00000001'
return precision
def main(argv=None):
if argv is None:
argv = sys.argv
try:
short_opts = 'hvsi:m:'
long_opts = ['help', 'verbose', 'single', 'interval', 'max-retry']
opts, vals = getopt.getopt(argv[1:], short_opts, long_opts)
except getopt.GetoptError as e:
sys.stderr.write(e.msg)
sys.stderr.write("for help use --help")
return 2
parameters = {
'verbose': False,
'update_all_coins': True,
'coin_list': None,
'interval': 60, # 1 minute
'max_retry': -1, # Set to -1 for unlimited
}
run_loop = True
for opt, arg in opts:
if opt in ('-h', '--help'):
print(__doc__)
return 0
elif opt in ('-v', '--verbose'):
parameters['verbose'] = True
elif opt in ('-s', '--single'):
run_loop = False
elif opt in ('-i', '--interval'):
parameters['interval'] = float(arg)
elif opt in ('-m', '--max-retry'):
parameters['max_retry'] = int(arg)
surge = Surge(**parameters)
if run_loop:
surge.update_loop()
else:
surge.update_cryptocoincharts()
surge.bittrex_orderbook_snapshot()
try:
if conn:
conn.close()
except:
pass
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of kothic, the realtime map renderer.
# kothic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# kothic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with kothic. If not, see <http://www.gnu.org/licenses/>.
import re
INVERSIONS = {"eq":"ne", "true":"false", "set":"unset", "<":">=", ">":"<="}
in2 = {}
for a,b in INVERSIONS.iteritems():
in2[b] = a
INVERSIONS.update(in2)
del in2
class Condition:
def __init__(self, typez, params):
self.type=typez # eq, regex, lt, gt etc.
if type(params) == type(str()):
params = (params,)
self.params=params # e.g. ('highway','primary')
if typez == "regex":
self.regex = re.compile(self.params[0], re.I)
self.compiled_regex = ""
def get_interesting_tags(self):
if self.params[0][:2] == "::":
return []
return set([self.params[0]])
def get_numerics(self):
if self.type in ("<", ">", ">=", "<="):
return self.params[0]
else:
return False
def test(self, tags):
"""
Test a hash against this condition
"""
t = self.type
params = self.params
if t == 'eq': # don't compare tags against sublayers
if params[0][:2] == "::":
return True
try:
if t == 'eq':
return tags[params[0]]==params[1]
if t == 'ne':
return tags.get(params[0], "")!=params[1]
if t == 'regex':
return bool(self.regex.match(tags[params[0]]))
if t == 'true':
return (tags[params[0]]=='true') | (tags[params[0]]=='yes') | (tags[params[0]]=='1')
if t == 'false':
return (tags.get(params[0], "")=='false') | (tags.get(params[0], "")=='no') | (tags.get(params[0], "")=='')
if t == 'set':
if params[0] in tags:
return tags[params[0]]!=''
return False
if t == 'unset':
if params[0] in tags:
return tags[params[0]]==''
return True
if t == '<':
return (Number(tags[params[0]])< Number(params[1]))
if t == '<=':
return (Number(tags[params[0]])<=Number(params[1]))
if t == '>':
return (Number(tags[params[0]])> Number(params[1]))
if t == '>=':
return (Number(tags[params[0]])>=Number(params[1]))
except KeyError:
pass
return False;
def inverse(self):
"""
Get a not-A for condition A
"""
t = self.type
params = self.params
try:
return Condition(INVERSIONS[t], params)
if t == 'regex':
### FIXME: learn how to invert regexes
return Condition("regex", params)
except KeyError:
pass
return self;
def get_sql(self):
#params = [re.escape(x) for x in self.params]
params = self.params
t = self.type
if t == 'eq': # don't compare tags against sublayers
if params[0][:2] == "::":
return ("","")
try:
if t == 'eq':
return params[0], '"%s" = \'%s\''%(params[0], params[1])
if t == 'ne':
return params[0], '("%s" != \'%s\' or "%s" IS NULL)'%(params[0], params[1],params[0])
if t == 'regex':
return params[0], '"%s" ~ \'%s\''%(params[0],params[1].replace("'","\\'"))
if t == 'true':
return params[0], '"%s" IN (\'true\', \'yes\', \'1\')'%(params[0])
if t == 'untrue':
return params[0], '"%s" NOT IN (\'true\', \'yes\', \'1\')'%(params[0])
if t == 'set':
return params[0], '"%s" IS NOT NULL'%(params[0])
if t == 'unset':
return params[0], '"%s" IS NULL'%(params[0])
if t == '<':
return params[0], """(CASE WHEN "%s" ~ E'^[-]?[[:digit:]]+([.][[:digit:]]+)?$' THEN CAST ("%s" AS FLOAT) < %s ELSE false END) """%(params[0],params[0],params[1])
if t == '<=':
return params[0], """(CASE WHEN "%s" ~ E'^[-]?[[:digit:]]+([.][[:digit:]]+)?$' THEN CAST ("%s" AS FLOAT) <= %s ELSE false END)"""%(params[0],params[0],params[1])
if t == '>':
return params[0], """(CASE WHEN "%s" ~ E'^[-]?[[:digit:]]+([.][[:digit:]]+)?$' THEN CAST ("%s" AS FLOAT) > %s ELSE false END) """%(params[0],params[0],params[1])
if t == '>=':
return params[0], """(CASE WHEN "%s" ~ E'^[-]?[[:digit:]]+([.][[:digit:]]+)?$' THEN CAST ("%s" AS FLOAT) >= %s ELSE false END) """%(params[0],params[0],params[1])
except KeyError:
pass
def get_mapnik_filter(self):
#params = [re.escape(x) for x in self.params]
params = self.params
t = self.type
if t == 'eq': # don't compare tags against sublayers
if params[0][:2] == "::":
return ''
try:
if t == 'eq':
return '[%s] = \'%s\''%(params[0], params[1])
if t == 'ne':
return 'not([%s] = \'%s\')'%(params[0], params[1])
if t == 'regex':
return '[%s].match(\'%s\')'%(params[0], params[1].replace("'","\\'"))
if t == 'true':
return '[%s] = \'yes\''%(params[0])
if t == 'untrue':
return '[%s] = \'no\''%(params[0])
if t == 'set':
return '[%s] != \'\''%(params[0])
if t == 'unset':
return 'not([%s] != \'\')'%(params[0])
if t == '<':
return '[%s__num] < %s'%(params[0], float(params[1]))
if t == '<=':
return '[%s__num] <= %s'%(params[0], float(params[1]))
if t == '>':
return '[%s__num] > %s'%(params[0], float(params[1]))
if t == '>=':
return '[%s__num] >= %s'%(params[0], float(params[1]))
#return ""
except KeyError:
pass
def __repr__(self):
return "%s %s "%(self.type, repr(self.params))
def __eq__(self, a):
return (self.params == a.params) and (self.type == a.type)
def and_with(self, c2):
"""
merges two rules with AND.
"""
#TODO: possible other minimizations
if c2.params[0] == self.params[0]:
if c2.params == self.params:
if c2.type == INVERSIONS[self.type]: # for example, eq AND ne = 0
return False
if c2.type == self.type:
return (self,)
if self.type == ">=" and c2.type == "<=": # a<=2 and a>=2 --> a=2
return (Condition ("eq", self.params),)
if self.type == "<=" and c2.type == ">=":
return (Condition ("eq", self.params),)
if self.type == ">" and c2.type == "<":
return False
if self.type == "<" and c2.type == ">":
return False
if c2.type == "eq" and self.type in ("ne", "<", ">"):
if c2.params[1] != self.params[1]:
return (c2,)
if self.type == "eq" and c2.type in ("ne", "<", ">"):
if c2.params[1] != self.params[1]:
return (self,)
if self.type == "eq" and c2.type == "eq":
if c2.params[1] != self.params[1]:
return False
if c2.type == "set" and self.type in ("eq","ne","regex", "<", "<=", ">", ">="): # a is set and a == b -> a == b
return (self,)
if c2.type == "unset" and self.type in ("eq","ne","regex", "<", "<=", ">", ">="): # a is unset and a == b -> impossible
return False
if self.type == "set" and c2.type in ("eq","ne","regex", "<", "<=", ">", ">="):
return (c2,)
if self.type == "unset" and c2.type in ("eq","ne","regex", "<", "<=", ">", ">="):
return False
return self, c2
def Number(tt):
"""
Wrap float() not to produce exceptions
"""
try:
return float(tt)
except ValueError:
return 0
|
|
# Copyright (c) 2012-2016, Neville-Neil Consulting
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Neville-Neil Consulting nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: George V. Neville-Neil
#
# Description: An encoding for the Precision Time Protocol (IEEE-1588-2008)
# aka PTPv2.
import pcs
import time
PTP_SUBDOMAIN_NAME_LENGTH = 16
PTP_CODE_STRING_LENGTH = 4
PTP_UUID_LENGTH = 6
class Announce(pcs.Packet):
"""PTP Announce"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
originTimestampSeconds = pcs.Field("originTimestampSeconds", 48)
originTimestampNanoSeconds = pcs.Field("originTimestampNanoSeconds", 32)
currentUTCOffset = pcs.Field("currentUTCOffset", 16)
reserved0 = pcs.Field("reserved0", 8, default = 0)
grandmasterPriority1 = pcs.Field("grandmasterPriority1", 8)
grandmasterClockQuality = pcs.Field("grandmasterClockQuality", 32)
grandmasterPriority2 = pcs.Field("grandmasterPriority2", 8)
grandmasterClockIdentity = pcs.StringField("grandmasterClockIdentity", 8)
stepsRemoved = pcs.Field("stepsRemoved", 16)
timeSource = pcs.Field("timeSource", 8)
pcs.Packet.__init__(self, [originTimestampSeconds,
originTimestampNanoSeconds,
currentUTCOffset,
reserved0,
grandmasterPriority1,
grandmasterClockQuality,
grandmasterPriority2,
grandmasterClockIdentity,
stepsRemoved,
timeSource], bytes = bytes, **kv)
self.description = "PTP Announce"
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if (bytes is not None):
self.data = self.next(bytes[self.sizeof():len(bytes)],
timestamp = timestamp)
else:
self.data = None
class Sync(pcs.Packet):
"""PTP Sync"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
originTimestampSeconds = pcs.Field("originTimestampSeconds", 48)
originTimestampNanoSeconds = pcs.Field("originTimestampNanoSeconds", 32)
pcs.Packet.__init__(self, [originTimestampSeconds,
originTimestampNanoSeconds],
bytes = bytes, **kv)
self.description = "PTP Sync"
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if (bytes is not None):
self.data = self.next(bytes[self.sizeof():len(bytes)],
timestamp = timestamp)
else:
self.data = None
#
# NOTE: Sync and Delay Request messages have the same format, BUT
# it is far easier to write scripts that differentiate between
# these two messages. Keep this class and the Sync class in Sync
# or you will have significant problems with your code.
class DelayRequest(pcs.Packet):
"""PTP DelayRequest"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
originTimestampSeconds = pcs.Field("originTimestampSeconds", 48)
originTimestampNanoSeconds = pcs.Field("originTimestampNanoSeconds", 32)
pcs.Packet.__init__(self, [originTimestampSeconds,
originTimestampNanoSeconds],
bytes = bytes, **kv)
self.description = "PTP DelayRequest"
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if (bytes is not None):
self.data = self.next(bytes[self.sizeof():len(bytes)],
timestamp = timestamp)
else:
self.data = None
#
# All followup messages have an associated common header.
# See ptpCommon() at the head of this file.
class Followup(pcs.Packet):
"""PTP Followup"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
"""Followup Header """
preciseOriginTimestampSeconds = pcs.Field("preciseOriginTimestampSeconds",
48)
preciseOriginTimestampNanoSeconds = pcs.Field(
"preciseOriginTimestampNanoSeconds", 32)
pcs.Packet.__init__(self, [preciseOriginTimestampSeconds,
preciseOriginTimestampNanoSeconds],
bytes = bytes, **kv)
self.description = "Followup"
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if (bytes is not None):
self.data = self.next(bytes[self.sizeof():len(bytes)],
timestamp = timestamp)
else:
self.data = None
#
# All delay response messages have an associated common header. See
# ptpCommon() at the head of this file.
class DelayResponse(pcs.Packet):
"""PTP Delay Response"""
_layout = pcs.Layout()
def __init__(self, bytes = None, timestamp = None, **kv):
"""Delay Response"""
receiveTimestampSeconds = pcs.Field("receiveTimestampSeconds", 48)
receiveTimestampNanoSeconds = pcs.Field("receiveTimestampNanoSeconds", 32)
requestingPortIdentity = pcs.Field("requestingPortIdentity", 80)
pcs.Packet.__init__(self, [receiveTimestampSeconds,
receiveTimestampNanoSeconds,
requestingPortIdentity],
bytes = bytes, **kv)
self.description = "Delay Response "
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
if (bytes is not None):
self.data = self.next(bytes[self.sizeof():len(bytes)],
timestamp = timestamp)
else:
self.data = None
|
|
# We can not use pytest here, because we run
# build_tools/azure/test_pytest_soft_dependency.sh on these
# tests to make sure estimator_checks works without pytest.
import unittest
import sys
import numpy as np
import scipy.sparse as sp
import joblib
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import deprecated
from sklearn.utils._testing import (
raises,
assert_warns,
ignore_warnings,
MinimalClassifier,
MinimalRegressor,
MinimalTransformer,
SkipTest,
)
from sklearn.utils.estimator_checks import check_estimator, _NotAnArray
from sklearn.utils.estimator_checks \
import check_class_weight_balanced_linear_classifier
from sklearn.utils.estimator_checks import set_random_state
from sklearn.utils.estimator_checks import _set_checking_parameters
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_fit_score_takes_y
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
from sklearn.utils.estimator_checks import check_classifier_data_not_an_array
from sklearn.utils.estimator_checks import check_regressor_data_not_an_array
from sklearn.utils.estimator_checks import \
check_estimator_get_tags_default_keys
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.estimator_checks import check_outlier_corruption
from sklearn.utils.fixes import np_version, parse_version
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, SGDClassifier
from sklearn.mixture import GaussianMixture
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import NMF
from sklearn.linear_model import MultiTaskElasticNet, LogisticRegression
from sklearn.svm import SVC, NuSVC
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils.validation import check_array
from sklearn.utils import all_estimators
from sklearn.exceptions import SkipTestWarning
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self, key=0):
self.key = key
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class SetsWrongAttribute(BaseEstimator):
def __init__(self, acceptable_key=0):
self.acceptable_key = acceptable_key
def fit(self, X, y=None):
self.wrong_attribute = 0
X, y = self._validate_data(X, y)
return self
class ChangesWrongAttribute(BaseEstimator):
def __init__(self, wrong_attribute=0):
self.wrong_attribute = wrong_attribute
def fit(self, X, y=None):
self.wrong_attribute = 1
X, y = self._validate_data(X, y)
return self
class ChangesUnderscoreAttribute(BaseEstimator):
def fit(self, X, y=None):
self._good_attribute = 1
X, y = self._validate_data(X, y)
return self
class RaisesErrorInSetParams(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
raise ValueError("p can't be less than 0")
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class HasMutableParameters(BaseEstimator):
def __init__(self, p=object()):
self.p = p
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class HasImmutableParameters(BaseEstimator):
# Note that object is an uninitialized class, thus immutable.
def __init__(self, p=42, q=np.int32(42), r=object):
self.p = p
self.q = q
self.r = r
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class ModifiesValueInsteadOfRaisingError(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
p = 0
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class ModifiesAnotherValue(BaseEstimator):
def __init__(self, a=0, b='method1'):
self.a = a
self.b = b
def set_params(self, **kwargs):
if 'a' in kwargs:
a = kwargs.pop('a')
self.a = a
if a is None:
kwargs.pop('b')
self.b = 'method2'
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class BadBalancedWeightsClassifier(BaseBadClassifier):
def __init__(self, class_weight=None):
self.class_weight = class_weight
def fit(self, X, y):
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
label_encoder = LabelEncoder().fit(y)
classes = label_encoder.classes_
class_weight = compute_class_weight(self.class_weight, classes=classes,
y=y)
# Intentionally modify the balanced class_weight
# to simulate a bug and raise an exception
if self.class_weight == "balanced":
class_weight += 1.
# Simply assigning coef_ to the class_weight
self.coef_ = class_weight
return self
class BadTransformerWithoutMixin(BaseEstimator):
def fit(self, X, y=None):
X = self._validate_data(X)
return self
def transform(self, X):
X = check_array(X)
return X
class NotInvariantPredict(BaseEstimator):
def fit(self, X, y):
# Convert data
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
return self
def predict(self, X):
# return 1 if X has more than one element else return 0
X = check_array(X)
if X.shape[0] > 1:
return np.ones(X.shape[0])
return np.zeros(X.shape[0])
class NotInvariantSampleOrder(BaseEstimator):
def fit(self, X, y):
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# store the original X to check for sample order later
self._X = X
return self
def predict(self, X):
X = check_array(X)
# if the input contains the same elements but different sample order,
# then just return zeros.
if (np.array_equiv(np.sort(X, axis=0), np.sort(self._X, axis=0)) and
(X != self._X).any()):
return np.zeros(X.shape[0])
return X[:, 0]
class LargeSparseNotSupportedClassifier(BaseEstimator):
def fit(self, X, y):
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc", "coo"),
accept_large_sparse=True,
multi_output=True,
y_numeric=True)
if sp.issparse(X):
if X.getformat() == "coo":
if X.row.dtype == "int64" or X.col.dtype == "int64":
raise ValueError(
"Estimator doesn't support 64-bit indices")
elif X.getformat() in ["csc", "csr"]:
assert "int64" not in (X.indices.dtype, X.indptr.dtype),\
"Estimator doesn't support 64-bit indices"
return self
class SparseTransformer(BaseEstimator):
def fit(self, X, y=None):
self.X_shape_ = self._validate_data(X).shape
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X):
X = check_array(X)
if X.shape[1] != self.X_shape_[1]:
raise ValueError('Bad number of features')
return sp.csr_matrix(X)
class EstimatorInconsistentForPandas(BaseEstimator):
def fit(self, X, y):
try:
from pandas import DataFrame
if isinstance(X, DataFrame):
self.value_ = X.iloc[0, 0]
else:
X = check_array(X)
self.value_ = X[1, 0]
return self
except ImportError:
X = check_array(X)
self.value_ = X[1, 0]
return self
def predict(self, X):
X = check_array(X)
return np.array([self.value_] * X.shape[0])
class UntaggedBinaryClassifier(SGDClassifier):
# Toy classifier that only supports binary classification, will fail tests.
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
super().fit(X, y, coef_init, intercept_init, sample_weight)
if len(self.classes_) > 2:
raise ValueError('Only 2 classes are supported')
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
super().partial_fit(X=X, y=y, classes=classes,
sample_weight=sample_weight)
if len(self.classes_) > 2:
raise ValueError('Only 2 classes are supported')
return self
class TaggedBinaryClassifier(UntaggedBinaryClassifier):
# Toy classifier that only supports binary classification.
def _more_tags(self):
return {'binary_only': True}
class EstimatorMissingDefaultTags(BaseEstimator):
def _get_tags(self):
tags = super()._get_tags().copy()
del tags["allow_nan"]
return tags
class RequiresPositiveYRegressor(LinearRegression):
def fit(self, X, y):
X, y = self._validate_data(X, y, multi_output=True)
if (y <= 0).any():
raise ValueError('negative y values not supported!')
return super().fit(X, y)
def _more_tags(self):
return {"requires_positive_y": True}
class PoorScoreLogisticRegression(LogisticRegression):
def decision_function(self, X):
return super().decision_function(X) + 1
def _more_tags(self):
return {"poor_score": True}
def test_not_an_array_array_function():
if np_version < parse_version('1.17'):
raise SkipTest("array_function protocol not supported in numpy <1.17")
not_array = _NotAnArray(np.ones(10))
msg = "Don't want to call array_function sum!"
with raises(TypeError, match=msg):
np.sum(not_array)
# always returns True
assert np.may_share_memory(not_array, None)
def test_check_fit_score_takes_y_works_on_deprecated_fit():
# Tests that check_fit_score_takes_y works on a class with
# a deprecated fit method
class TestEstimatorWithDeprecatedFitMethod(BaseEstimator):
@deprecated("Deprecated for the purpose of testing "
"check_fit_score_takes_y")
def fit(self, X, y):
return self
check_fit_score_takes_y("test", TestEstimatorWithDeprecatedFitMethod())
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "Passing a class was deprecated"
with raises(TypeError, match=msg):
check_estimator(object)
msg = (
"Parameter 'p' of estimator 'HasMutableParameters' is of type "
"object which is not allowed"
)
# check that the "default_constructible" test checks for mutable parameters
check_estimator(HasImmutableParameters()) # should pass
with raises(AssertionError, match=msg):
check_estimator(HasMutableParameters())
# check that values returned by get_params match set_params
msg = "get_params result does not match what was passed to set_params"
with raises(AssertionError, match=msg):
check_estimator(ModifiesValueInsteadOfRaisingError())
assert_warns(UserWarning, check_estimator, RaisesErrorInSetParams())
with raises(AssertionError, match=msg):
check_estimator(ModifiesAnotherValue())
# check that we have a fit method
msg = "object has no attribute 'fit'"
with raises(AttributeError, match=msg):
check_estimator(BaseEstimator())
# check that fit does input validation
msg = "Did not raise"
with raises(AssertionError, match=msg):
check_estimator(BaseBadClassifier())
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
with raises(ValueError, match=msg):
check_estimator(NoSampleWeightPandasSeriesType())
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
with raises(AssertionError, match=msg):
check_estimator(NoCheckinPredict())
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
with raises(AssertionError, match=msg):
check_estimator(ChangesDict())
# check that `fit` only changes attribures that
# are private (start with an _ or end with a _).
msg = ('Estimator ChangesWrongAttribute should not change or mutate '
'the parameter wrong_attribute from 0 to 1 during fit.')
with raises(AssertionError, match=msg):
check_estimator(ChangesWrongAttribute())
check_estimator(ChangesUnderscoreAttribute())
# check that `fit` doesn't add any public attribute
msg = (r'Estimator adds public attribute\(s\) during the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but wrong_attribute added')
with raises(AssertionError, match=msg):
check_estimator(SetsWrongAttribute())
# check for sample order invariance
name = NotInvariantSampleOrder.__name__
method = 'predict'
msg = ("{method} of {name} is not invariant when applied to a dataset"
"with different sample order.").format(method=method, name=name)
with raises(AssertionError, match=msg):
check_estimator(NotInvariantSampleOrder())
# check for invariant method
name = NotInvariantPredict.__name__
method = 'predict'
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
with raises(AssertionError, match=msg):
check_estimator(NotInvariantPredict())
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
with raises(AssertionError, match=msg):
check_estimator(NoSparseClassifier())
# Large indices test on bad estimator
msg = ('Estimator LargeSparseNotSupportedClassifier doesn\'t seem to '
r'support \S{3}_64 matrix, and is not failing gracefully.*')
with raises(AssertionError, match=msg):
check_estimator(LargeSparseNotSupportedClassifier())
# does error on binary_only untagged estimator
msg = 'Only 2 classes are supported'
with raises(ValueError, match=msg):
check_estimator(UntaggedBinaryClassifier())
# non-regression test for estimators transforming to sparse data
check_estimator(SparseTransformer())
# doesn't error on actual estimator
check_estimator(LogisticRegression())
check_estimator(LogisticRegression(C=0.01))
check_estimator(MultiTaskElasticNet())
# doesn't error on binary_only tagged estimator
check_estimator(TaggedBinaryClassifier())
# Check regressor with requires_positive_y estimator tag
msg = 'negative y values not supported!'
with raises(ValueError, match=msg):
check_estimator(RequiresPositiveYRegressor())
# Does not raise error on classifier with poor_score tag
check_estimator(PoorScoreLogisticRegression())
def test_check_outlier_corruption():
# should raise AssertionError
decision = np.array([0., 1., 1.5, 2.])
with raises(AssertionError):
check_outlier_corruption(1, 2, decision)
# should pass
decision = np.array([0., 1., 1., 2.])
check_outlier_corruption(1, 2, decision)
def test_check_estimator_transformer_no_mixin():
# check that TransformerMixin is not required for transformer tests to run
with raises(AttributeError, '.*fit_transform.*'):
check_estimator(BadTransformerWithoutMixin())
def test_check_estimator_clones():
# check that check_estimator doesn't modify the estimator it receives
from sklearn.datasets import load_iris
iris = load_iris()
for Estimator in [GaussianMixture, LinearRegression,
RandomForestClassifier, NMF, SGDClassifier,
MiniBatchKMeans]:
with ignore_warnings(category=FutureWarning):
# when 'est = SGDClassifier()'
est = Estimator()
_set_checking_parameters(est)
set_random_state(est)
# without fitting
old_hash = joblib.hash(est)
check_estimator(est)
assert old_hash == joblib.hash(est)
with ignore_warnings(category=FutureWarning):
# when 'est = SGDClassifier()'
est = Estimator()
_set_checking_parameters(est)
set_random_state(est)
# with fitting
est.fit(iris.data + 10, iris.target)
old_hash = joblib.hash(est)
check_estimator(est)
assert old_hash == joblib.hash(est)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "Did not raise"
with raises(AssertionError, match=msg):
check_estimators_unfitted("estimator", NoSparseClassifier())
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier())
def test_check_no_attributes_set_in_init():
class NonConformantEstimatorPrivateSet(BaseEstimator):
def __init__(self):
self.you_should_not_set_this_ = None
class NonConformantEstimatorNoParamSet(BaseEstimator):
def __init__(self, you_should_set_this_=None):
pass
msg = (
"Estimator estimator_name should not set any"
" attribute apart from parameters during init."
r" Found attributes \['you_should_not_set_this_'\]."
)
with raises(AssertionError, match=msg):
check_no_attributes_set_in_init('estimator_name',
NonConformantEstimatorPrivateSet())
msg = (
"Estimator estimator_name should store all parameters as an attribute"
" during init"
)
with raises(AttributeError, match=msg):
check_no_attributes_set_in_init('estimator_name',
NonConformantEstimatorNoParamSet())
def test_check_estimator_pairwise():
# check that check_estimator() works on estimator with _pairwise
# kernel or metric
# test precomputed kernel
est = SVC(kernel='precomputed')
check_estimator(est)
# test precomputed metric
est = KNeighborsRegressor(metric='precomputed')
check_estimator(est)
def test_check_classifier_data_not_an_array():
with raises(AssertionError, match='Not equal to tolerance'):
check_classifier_data_not_an_array('estimator_name',
EstimatorInconsistentForPandas())
def test_check_regressor_data_not_an_array():
with raises(AssertionError, match='Not equal to tolerance'):
check_regressor_data_not_an_array('estimator_name',
EstimatorInconsistentForPandas())
def test_check_estimator_get_tags_default_keys():
estimator = EstimatorMissingDefaultTags()
err_msg = (r"EstimatorMissingDefaultTags._get_tags\(\) is missing entries"
r" for the following default tags: {'allow_nan'}")
with raises(AssertionError, match=err_msg):
check_estimator_get_tags_default_keys(estimator.__class__.__name__,
estimator)
# noop check when _get_tags is not available
estimator = MinimalTransformer()
check_estimator_get_tags_default_keys(
estimator.__class__.__name__, estimator
)
def run_tests_without_pytest():
"""Runs the tests in this file without using pytest.
"""
main_module = sys.modules['__main__']
test_functions = [getattr(main_module, name) for name in dir(main_module)
if name.startswith('test_')]
test_cases = [unittest.FunctionTestCase(fn) for fn in test_functions]
suite = unittest.TestSuite()
suite.addTests(test_cases)
runner = unittest.TextTestRunner()
runner.run(suite)
def test_check_class_weight_balanced_linear_classifier():
# check that ill-computed balanced weights raises an exception
msg = (
"Classifier estimator_name is not computing class_weight=balanced "
"properly"
)
with raises(AssertionError, match=msg):
check_class_weight_balanced_linear_classifier(
'estimator_name',
BadBalancedWeightsClassifier
)
def test_all_estimators_all_public():
# all_estimator should not fail when pytest is not installed and return
# only public estimators
estimators = all_estimators()
for est in estimators:
assert not est.__class__.__name__.startswith("_")
if __name__ == '__main__':
# This module is run as a script to check that we have no dependency on
# pytest for estimator checks.
run_tests_without_pytest()
def test_xfail_ignored_in_check_estimator():
# Make sure checks marked as xfail are just ignored and not run by
# check_estimator(), but still raise a warning.
assert_warns(SkipTestWarning, check_estimator, NuSVC())
# FIXME: this test should be uncommented when the checks will be granular
# enough. In 0.24, these tests fail due to low estimator performance.
def test_minimal_class_implementation_checks():
# Check that third-party library can run tests without inheriting from
# BaseEstimator.
# FIXME
raise SkipTest
minimal_estimators = [
MinimalTransformer(), MinimalRegressor(), MinimalClassifier()
]
for estimator in minimal_estimators:
check_estimator(estimator)
|
|
"""Support for installing and building the "wheel" binary package format.
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
from __future__ import absolute_import
import collections
import compileall
import csv
import logging
import os.path
import re
import shutil
import stat
import sys
import warnings
from base64 import urlsafe_b64encode
from zipfile import ZipFile
from pip._vendor import pkg_resources
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor.distlib.util import get_export_entry
from pip._vendor.six import StringIO
from pip._internal.exceptions import InstallationError
from pip._internal.locations import get_major_minor_version
from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.unpacking import unpack_file
from pip._internal.utils.wheel import parse_wheel
if MYPY_CHECK_RUNNING:
from email.message import Message
from typing import (
Dict, List, Optional, Sequence, Tuple, IO, Text, Any,
Iterable, Callable, Set,
)
from pip._internal.models.scheme import Scheme
InstalledCSVRow = Tuple[str, ...]
logger = logging.getLogger(__name__)
def normpath(src, p):
# type: (str, str) -> str
return os.path.relpath(src, p).replace(os.path.sep, '/')
def rehash(path, blocksize=1 << 20):
# type: (str, int) -> Tuple[str, str]
"""Return (encoded_digest, length) for path using hashlib.sha256()"""
h, length = hash_file(path, blocksize)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def open_for_csv(name, mode):
# type: (str, Text) -> IO[Any]
if sys.version_info[0] < 3:
nl = {} # type: Dict[str, Any]
bin = 'b'
else:
nl = {'newline': ''} # type: Dict[str, Any]
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
# type: (str) -> Optional[bool]
"""Replace #!python with #!/path/to/python
Return True if file was changed.
"""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
return None
def wheel_root_is_purelib(metadata):
# type: (Message) -> bool
return metadata.get("Root-Is-Purelib", "").lower() == "true"
def get_entrypoints(filename):
# type: (str) -> Tuple[Dict[str, str], Dict[str, str]]
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
# get the entry points and then the script names
entry_points = pkg_resources.EntryPoint.parse_map(data)
console = entry_points.get('console_scripts', {})
gui = entry_points.get('gui_scripts', {})
def _split_ep(s):
# type: (pkg_resources.EntryPoint) -> Tuple[str, str]
"""get the string representation of EntryPoint,
remove space and split on '='
"""
split_parts = str(s).replace(" ", "").split("=")
return split_parts[0], split_parts[1]
# convert the EntryPoint objects into strings with module:function
console = dict(_split_ep(v) for v in console.values())
gui = dict(_split_ep(v) for v in gui.values())
return console, gui
def message_about_scripts_not_on_PATH(scripts):
# type: (Sequence[str]) -> Optional[str]
"""Determine if any scripts are not on PATH and format a warning.
Returns a warning message if one or more scripts are not on PATH,
otherwise None.
"""
if not scripts:
return None
# Group scripts by the path they were installed in
grouped_by_dir = collections.defaultdict(set) # type: Dict[str, Set[str]]
for destfile in scripts:
parent_dir = os.path.dirname(destfile)
script_name = os.path.basename(destfile)
grouped_by_dir[parent_dir].add(script_name)
# We don't want to warn for directories that are on PATH.
not_warn_dirs = [
os.path.normcase(i).rstrip(os.sep) for i in
os.environ.get("PATH", "").split(os.pathsep)
]
# If an executable sits with sys.executable, we don't warn for it.
# This covers the case of venv invocations without activating the venv.
not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
warn_for = {
parent_dir: scripts for parent_dir, scripts in grouped_by_dir.items()
if os.path.normcase(parent_dir) not in not_warn_dirs
} # type: Dict[str, Set[str]]
if not warn_for:
return None
# Format a message
msg_lines = []
for parent_dir, dir_scripts in warn_for.items():
sorted_scripts = sorted(dir_scripts) # type: List[str]
if len(sorted_scripts) == 1:
start_text = "script {} is".format(sorted_scripts[0])
else:
start_text = "scripts {} are".format(
", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
)
msg_lines.append(
"The {} installed in '{}' which is not on PATH."
.format(start_text, parent_dir)
)
last_line_fmt = (
"Consider adding {} to PATH or, if you prefer "
"to suppress this warning, use --no-warn-script-location."
)
if len(msg_lines) == 1:
msg_lines.append(last_line_fmt.format("this directory"))
else:
msg_lines.append(last_line_fmt.format("these directories"))
# Add a note if any directory starts with ~
warn_for_tilde = any(
i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
)
if warn_for_tilde:
tilde_warning_msg = (
"NOTE: The current PATH contains path(s) starting with `~`, "
"which may not be expanded by all applications."
)
msg_lines.append(tilde_warning_msg)
# Returns the formatted multiline message
return "\n".join(msg_lines)
def sorted_outrows(outrows):
# type: (Iterable[InstalledCSVRow]) -> List[InstalledCSVRow]
"""Return the given rows of a RECORD file in sorted order.
Each row is a 3-tuple (path, hash, size) and corresponds to a record of
a RECORD file (see PEP 376 and PEP 427 for details). For the rows
passed to this function, the size can be an integer as an int or string,
or the empty string.
"""
# Normally, there should only be one row per path, in which case the
# second and third elements don't come into play when sorting.
# However, in cases in the wild where a path might happen to occur twice,
# we don't want the sort operation to trigger an error (but still want
# determinism). Since the third element can be an int or string, we
# coerce each element to a string to avoid a TypeError in this case.
# For additional background, see--
# https://github.com/pypa/pip/issues/5868
return sorted(outrows, key=lambda row: tuple(str(x) for x in row))
def get_csv_rows_for_installed(
old_csv_rows, # type: Iterable[List[str]]
installed, # type: Dict[str, str]
changed, # type: Set[str]
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning(
'RECORD line has more than three elements: {}'.format(row)
)
# Make a copy because we are mutating the row.
row = list(row)
old_path = row[0]
new_path = installed.pop(old_path, old_path)
row[0] = new_path
if new_path in changed:
digest, length = rehash(new_path)
row[1] = digest
row[2] = length
installed_rows.append(tuple(row))
for f in generated:
digest, length = rehash(f)
installed_rows.append((normpath(f, lib_dir), digest, str(length)))
for f in installed:
installed_rows.append((installed[f], '', ''))
return installed_rows
class MissingCallableSuffix(Exception):
pass
def _raise_for_invalid_entrypoint(specification):
# type: (str) -> None
entry = get_export_entry(specification)
if entry is not None and entry.suffix is None:
raise MissingCallableSuffix(str(entry))
class PipScriptMaker(ScriptMaker):
def make(self, specification, options=None):
# type: (str, Dict[str, Any]) -> List[str]
_raise_for_invalid_entrypoint(specification)
return super(PipScriptMaker, self).make(specification, options)
def install_unpacked_wheel(
name, # type: str
wheeldir, # type: str
wheel_zip, # type: ZipFile
scheme, # type: Scheme
req_description, # type: str
pycompile=True, # type: bool
warn_script_location=True # type: bool
):
# type: (...) -> None
"""Install a wheel.
:param name: Name of the project to install
:param wheeldir: Base directory of the unpacked wheel
:param wheel_zip: open ZipFile for wheel being installed
:param scheme: Distutils scheme dictating the install directories
:param req_description: String used in place of the requirement, for
logging
:param pycompile: Whether to byte-compile installed Python files
:param warn_script_location: Whether to check that scripts are installed
into a directory on PATH
:raises UnsupportedWheel:
* when the directory holds an unpacked wheel with incompatible
Wheel-Version
* when the .dist-info dir does not match the wheel
"""
# TODO: Investigate and break this up.
# TODO: Look into moving this into a dedicated class for representing an
# installation.
source = wheeldir.rstrip(os.path.sep) + os.path.sep
info_dir, metadata = parse_wheel(wheel_zip, name)
if wheel_root_is_purelib(metadata):
lib_dir = scheme.purelib
else:
lib_dir = scheme.platlib
subdirs = os.listdir(source)
data_dirs = [s for s in subdirs if s.endswith('.data')]
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {} # type: Dict[str, str]
changed = set()
generated = [] # type: List[str]
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def record_installed(srcfile, destfile, modified=False):
# type: (str, str, bool) -> None
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(
source, # type: str
dest, # type: str
is_base, # type: bool
fixer=None, # type: Optional[Callable[[str], Any]]
filter=None # type: Optional[Callable[[str], bool]]
):
# type: (...) -> None
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir == '':
subdirs[:] = [s for s in subdirs if not s.endswith('.data')]
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# copyfile (called below) truncates the destination if it
# exists and then writes the new contents. This is fine in most
# cases, but can cause a segfault if pip has loaded a shared
# object (e.g. from pyopenssl through its vendored urllib3)
# Since the shared object is mmap'd an attempt to call a
# symbol in it will then cause a segfault. Unlinking the file
# allows writing of new contents while allowing the process to
# continue to use the old copy.
if os.path.exists(destfile):
os.unlink(destfile)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
dest_info_dir = os.path.join(lib_dir, info_dir)
# Get the defined entry points
ep_file = os.path.join(dest_info_dir, 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# type: (str) -> bool
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = getattr(scheme, subdir)
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = PipScriptMaker(None, scheme.scripts)
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = {''}
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
scripts_to_generate = []
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append('pip = ' + pip_script)
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
scripts_to_generate.append(
'pip%s = %s' % (sys.version_info[0], pip_script)
)
scripts_to_generate.append(
'pip%s = %s' % (get_major_minor_version(), pip_script)
)
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
scripts_to_generate.append(
'easy_install = ' + easy_install_script
)
scripts_to_generate.append(
'easy_install-%s = %s' % (
get_major_minor_version(), easy_install_script
)
)
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
scripts_to_generate.extend(
'%s = %s' % kv for kv in console.items()
)
gui_scripts_to_generate = [
'%s = %s' % kv for kv in gui.items()
]
generated_console_scripts = [] # type: List[str]
try:
generated_console_scripts = maker.make_multiple(scripts_to_generate)
generated.extend(generated_console_scripts)
generated.extend(
maker.make_multiple(gui_scripts_to_generate, {'gui': True})
)
except MissingCallableSuffix as e:
entry = e.args[0]
raise InstallationError(
"Invalid script entry point: {} for req: {} - A callable "
"suffix is required. Cf https://packaging.python.org/"
"specifications/entry-points/#use-for-scripts for more "
"information.".format(entry, req_description)
)
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if msg is not None:
logger.warning(msg)
# Record pip as the installer
installer = os.path.join(dest_info_dir, 'INSTALLER')
temp_installer = os.path.join(dest_info_dir, 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(dest_info_dir, 'RECORD')
temp_record = os.path.join(dest_info_dir, 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
outrows = get_csv_rows_for_installed(
reader, installed=installed, changed=changed,
generated=generated, lib_dir=lib_dir,
)
writer = csv.writer(record_out)
# Sort to simplify testing.
for row in sorted_outrows(outrows):
writer.writerow(row)
shutil.move(temp_record, record)
def install_wheel(
name, # type: str
wheel_path, # type: str
scheme, # type: Scheme
req_description, # type: str
pycompile=True, # type: bool
warn_script_location=True, # type: bool
_temp_dir_for_testing=None, # type: Optional[str]
):
# type: (...) -> None
with TempDirectory(
path=_temp_dir_for_testing, kind="unpacked-wheel"
) as unpacked_dir, ZipFile(wheel_path, allowZip64=True) as z:
unpack_file(wheel_path, unpacked_dir.path)
install_unpacked_wheel(
name=name,
wheeldir=unpacked_dir.path,
wheel_zip=z,
scheme=scheme,
req_description=req_description,
pycompile=pycompile,
warn_script_location=warn_script_location,
)
|
|
# -*- test-case-name: pyflakes -*-
# (c) 2005-2008 Divmod, Inc.
# See LICENSE file for details
import __builtin__
import os.path
from compiler import ast
from pyflakes import messages
class Binding(object):
"""
Represents the binding of a value to a name.
The checker uses this to keep track of which names have been bound and
which names have not. See L{Assignment} for a special type of binding that
is checked with stricter rules.
@ivar used: pair of (L{Scope}, line-number) indicating the scope and
line number that this binding was last used
"""
def __init__(self, name, source):
self.name = name
self.source = source
self.used = False
def __str__(self):
return self.name
def __repr__(self):
return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__,
self.name,
self.source.lineno,
id(self))
class UnBinding(Binding):
'''Created by the 'del' operator.'''
class Importation(Binding):
"""
A binding created by an import statement.
@ivar fullName: The complete name given to the import statement,
possibly including multiple dotted components.
@type fullName: C{str}
"""
def __init__(self, name, source):
self.fullName = name
name = name.split('.')[0]
super(Importation, self).__init__(name, source)
class Argument(Binding):
"""
Represents binding a name as an argument.
"""
class Assignment(Binding):
"""
Represents binding a name with an explicit assignment.
The checker will raise warnings for any Assignment that isn't used. Also,
the checker does not consider assignments in tuple/list unpacking to be
Assignments, rather it treats them as simple Bindings.
"""
class FunctionDefinition(Binding):
pass
class ExportBinding(Binding):
"""
A binding created by an C{__all__} assignment. If the names in the list
can be determined statically, they will be treated as names for export and
additional checking applied to them.
The only C{__all__} assignment that can be recognized is one which takes
the value of a literal list containing literal strings. For example::
__all__ = ["foo", "bar"]
Names which are imported and not otherwise used but appear in the value of
C{__all__} will not have an unused import warning reported for them.
"""
def names(self):
"""
Return a list of the names referenced by this binding.
"""
names = []
if isinstance(self.source, ast.List):
for node in self.source.nodes:
if isinstance(node, ast.Const):
names.append(node.value)
return names
class Scope(dict):
importStarred = False # set to True when import * is found
def __repr__(self):
return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self))
def __init__(self):
super(Scope, self).__init__()
class ClassScope(Scope):
pass
class FunctionScope(Scope):
"""
I represent a name scope for a function.
@ivar globals: Names declared 'global' in this function.
"""
def __init__(self):
super(FunctionScope, self).__init__()
self.globals = {}
class ModuleScope(Scope):
pass
# Globally defined names which are not attributes of the __builtin__ module.
_MAGIC_GLOBALS = ['__file__', '__builtins__']
class Checker(object):
"""
I check the cleanliness and sanity of Python code.
@ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements
of the list are two-tuples. The first element is the callable passed
to L{deferFunction}. The second element is a copy of the scope stack
at the time L{deferFunction} was called.
@ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for
callables which are deferred assignment checks.
"""
nodeDepth = 0
traceTree = False
def __init__(self, tree, filename='(none)'):
self._deferredFunctions = []
self._deferredAssignments = []
self.dead_scopes = []
self.messages = []
self.filename = filename
self.scopeStack = [ModuleScope()]
self.futuresAllowed = True
self.handleChildren(tree)
self._runDeferred(self._deferredFunctions)
# Set _deferredFunctions to None so that deferFunction will fail
# noisily if called after we've run through the deferred functions.
self._deferredFunctions = None
self._runDeferred(self._deferredAssignments)
# Set _deferredAssignments to None so that deferAssignment will fail
# noisly if called after we've run through the deferred assignments.
self._deferredAssignments = None
del self.scopeStack[1:]
self.popScope()
self.check_dead_scopes()
def deferFunction(self, callable):
'''
Schedule a function handler to be called just before completion.
This is used for handling function bodies, which must be deferred
because code later in the file might modify the global scope. When
`callable` is called, the scope at the time this is called will be
restored, however it will contain any new bindings added to it.
'''
self._deferredFunctions.append((callable, self.scopeStack[:]))
def deferAssignment(self, callable):
"""
Schedule an assignment handler to be called just after deferred
function handlers.
"""
self._deferredAssignments.append((callable, self.scopeStack[:]))
def _runDeferred(self, deferred):
"""
Run the callables in C{deferred} using their associated scope stack.
"""
for handler, scope in deferred:
self.scopeStack = scope
handler()
def scope(self):
return self.scopeStack[-1]
scope = property(scope)
def popScope(self):
self.dead_scopes.append(self.scopeStack.pop())
def check_dead_scopes(self):
"""
Look at scopes which have been fully examined and report names in them
which were imported but unused.
"""
for scope in self.dead_scopes:
export = isinstance(scope.get('__all__'), ExportBinding)
if export:
all = scope['__all__'].names()
if os.path.split(self.filename)[1] != '__init__.py':
# Look for possible mistakes in the export list
undefined = set(all) - set(scope)
for name in undefined:
self.report(
messages.UndefinedExport,
scope['__all__'].source.lineno,
name)
else:
all = []
# Look for imported names that aren't used.
for importation in scope.itervalues():
if isinstance(importation, Importation):
if not importation.used and importation.name not in all:
self.report(
messages.UnusedImport,
importation.source.lineno,
importation.name)
def pushFunctionScope(self):
self.scopeStack.append(FunctionScope())
def pushClassScope(self):
self.scopeStack.append(ClassScope())
def report(self, messageClass, *args, **kwargs):
self.messages.append(messageClass(self.filename, *args, **kwargs))
def handleChildren(self, tree):
for node in tree.getChildNodes():
self.handleNode(node, tree)
def handleNode(self, node, parent):
node.parent = parent
if self.traceTree:
print ' ' * self.nodeDepth + node.__class__.__name__
self.nodeDepth += 1
nodeType = node.__class__.__name__.upper()
if nodeType not in ('STMT', 'FROM'):
self.futuresAllowed = False
try:
handler = getattr(self, nodeType)
handler(node)
finally:
self.nodeDepth -= 1
if self.traceTree:
print ' ' * self.nodeDepth + 'end ' + node.__class__.__name__
def ignore(self, node):
pass
STMT = PRINT = PRINTNL = TUPLE = LIST = ASSTUPLE = ASSATTR = \
ASSLIST = GETATTR = SLICE = SLICEOBJ = IF = CALLFUNC = DISCARD = \
RETURN = ADD = MOD = SUB = NOT = UNARYSUB = INVERT = ASSERT = COMPARE = \
SUBSCRIPT = AND = OR = TRYEXCEPT = RAISE = YIELD = DICT = LEFTSHIFT = \
RIGHTSHIFT = KEYWORD = TRYFINALLY = WHILE = EXEC = MUL = DIV = POWER = \
FLOORDIV = BITAND = BITOR = BITXOR = LISTCOMPFOR = LISTCOMPIF = \
AUGASSIGN = BACKQUOTE = UNARYADD = GENEXPR = GENEXPRFOR = GENEXPRIF = \
IFEXP = handleChildren
CONST = PASS = CONTINUE = BREAK = ELLIPSIS = ignore
def addBinding(self, lineno, value, reportRedef=True):
'''Called when a binding is altered.
- `lineno` is the line of the statement responsible for the change
- `value` is the optional new value, a Binding instance, associated
with the binding; if None, the binding is deleted if it exists.
- if `reportRedef` is True (default), rebinding while unused will be
reported.
'''
if (isinstance(self.scope.get(value.name), FunctionDefinition)
and isinstance(value, FunctionDefinition)):
self.report(messages.RedefinedFunction,
lineno, value.name, self.scope[value.name].source.lineno)
if not isinstance(self.scope, ClassScope):
for scope in self.scopeStack[::-1]:
existing = scope.get(value.name)
if (isinstance(existing, Importation)
and not existing.used
and (not isinstance(value, Importation) or value.fullName == existing.fullName)
and reportRedef):
self.report(messages.RedefinedWhileUnused,
lineno, value.name, scope[value.name].source.lineno)
if isinstance(value, UnBinding):
try:
del self.scope[value.name]
except KeyError:
self.report(messages.UndefinedName, lineno, value.name)
else:
self.scope[value.name] = value
def WITH(self, node):
"""
Handle C{with} by checking the target of the statement (which can be an
identifier, a list or tuple of targets, an attribute, etc) for
undefined names and defining any it adds to the scope and by continuing
to process the suite within the statement.
"""
# Check the "foo" part of a "with foo as bar" statement. Do this no
# matter what, since there's always a "foo" part.
self.handleNode(node.expr, node)
if node.vars is not None:
self.handleNode(node.vars, node)
self.handleChildren(node.body)
def GLOBAL(self, node):
"""
Keep track of globals declarations.
"""
if isinstance(self.scope, FunctionScope):
self.scope.globals.update(dict.fromkeys(node.names))
def LISTCOMP(self, node):
for qual in node.quals:
self.handleNode(qual, node)
self.handleNode(node.expr, node)
GENEXPRINNER = LISTCOMP
def FOR(self, node):
"""
Process bindings for loop variables.
"""
vars = []
def collectLoopVars(n):
if hasattr(n, 'name'):
vars.append(n.name)
else:
for c in n.getChildNodes():
collectLoopVars(c)
collectLoopVars(node.assign)
for varn in vars:
if (isinstance(self.scope.get(varn), Importation)
# unused ones will get an unused import warning
and self.scope[varn].used):
self.report(messages.ImportShadowedByLoopVar,
node.lineno, varn, self.scope[varn].source.lineno)
self.handleChildren(node)
def NAME(self, node):
"""
Locate the name in locals / function / globals scopes.
"""
# try local scope
importStarred = self.scope.importStarred
try:
self.scope[node.name].used = (self.scope, node.lineno)
except KeyError:
pass
else:
return
# try enclosing function scopes
for scope in self.scopeStack[-2:0:-1]:
importStarred = importStarred or scope.importStarred
if not isinstance(scope, FunctionScope):
continue
try:
scope[node.name].used = (self.scope, node.lineno)
except KeyError:
pass
else:
return
# try global scope
importStarred = importStarred or self.scopeStack[0].importStarred
try:
self.scopeStack[0][node.name].used = (self.scope, node.lineno)
except KeyError:
if ((not hasattr(__builtin__, node.name))
and node.name not in _MAGIC_GLOBALS
and not importStarred):
if (os.path.basename(self.filename) == '__init__.py' and
node.name == '__path__'):
# the special name __path__ is valid only in packages
pass
else:
self.report(messages.UndefinedName, node.lineno, node.name)
def FUNCTION(self, node):
if getattr(node, "decorators", None) is not None:
self.handleChildren(node.decorators)
self.addBinding(node.lineno, FunctionDefinition(node.name, node))
self.LAMBDA(node)
def LAMBDA(self, node):
for default in node.defaults:
self.handleNode(default, node)
def runFunction():
args = []
def addArgs(arglist):
for arg in arglist:
if isinstance(arg, tuple):
addArgs(arg)
else:
if arg in args:
self.report(messages.DuplicateArgument, node.lineno, arg)
args.append(arg)
self.pushFunctionScope()
addArgs(node.argnames)
for name in args:
self.addBinding(node.lineno, Argument(name, node), reportRedef=False)
self.handleNode(node.code, node)
def checkUnusedAssignments():
"""
Check to see if any assignments have not been used.
"""
for name, binding in self.scope.iteritems():
if (not binding.used and not name in self.scope.globals
and isinstance(binding, Assignment)):
self.report(messages.UnusedVariable,
binding.source.lineno, name)
self.deferAssignment(checkUnusedAssignments)
self.popScope()
self.deferFunction(runFunction)
def CLASS(self, node):
"""
Check names used in a class definition, including its decorators, base
classes, and the body of its definition. Additionally, add its name to
the current scope.
"""
if getattr(node, "decorators", None) is not None:
self.handleChildren(node.decorators)
for baseNode in node.bases:
self.handleNode(baseNode, node)
self.addBinding(node.lineno, Binding(node.name, node))
self.pushClassScope()
self.handleChildren(node.code)
self.popScope()
def ASSNAME(self, node):
if node.flags == 'OP_DELETE':
if isinstance(self.scope, FunctionScope) and node.name in self.scope.globals:
del self.scope.globals[node.name]
else:
self.addBinding(node.lineno, UnBinding(node.name, node))
else:
# if the name hasn't already been defined in the current scope
if isinstance(self.scope, FunctionScope) and node.name not in self.scope:
# for each function or module scope above us
for scope in self.scopeStack[:-1]:
if not isinstance(scope, (FunctionScope, ModuleScope)):
continue
# if the name was defined in that scope, and the name has
# been accessed already in the current scope, and hasn't
# been declared global
if (node.name in scope
and scope[node.name].used
and scope[node.name].used[0] is self.scope
and node.name not in self.scope.globals):
# then it's probably a mistake
self.report(messages.UndefinedLocal,
scope[node.name].used[1],
node.name,
scope[node.name].source.lineno)
break
if isinstance(node.parent,
(ast.For, ast.ListCompFor, ast.GenExprFor,
ast.AssTuple, ast.AssList)):
binding = Binding(node.name, node)
elif (node.name == '__all__' and
isinstance(self.scope, ModuleScope) and
isinstance(node.parent, ast.Assign)):
binding = ExportBinding(node.name, node.parent.expr)
else:
binding = Assignment(node.name, node)
if node.name in self.scope:
binding.used = self.scope[node.name].used
self.addBinding(node.lineno, binding)
def ASSIGN(self, node):
self.handleNode(node.expr, node)
for subnode in node.nodes[::-1]:
self.handleNode(subnode, node)
def IMPORT(self, node):
for name, alias in node.names:
name = alias or name
importation = Importation(name, node)
self.addBinding(node.lineno, importation)
def FROM(self, node):
if node.modname == '__future__':
if not self.futuresAllowed:
self.report(messages.LateFutureImport, node.lineno, [n[0] for n in node.names])
else:
self.futuresAllowed = False
for name, alias in node.names:
if name == '*':
self.scope.importStarred = True
self.report(messages.ImportStarUsed, node.lineno, node.modname)
continue
name = alias or name
importation = Importation(name, node)
if node.modname == '__future__':
importation.used = (self.scope, node.lineno)
self.addBinding(node.lineno, importation)
|
|
from numpy import (
vstack, where, intersect1d, in1d, unique,
cross, abs, arccos, sign,
dot, array, cov, nan_to_num, inf, pi,
hstack, repeat, bincount, arange
)
from numpy.linalg import norm, solve
class Box2D:
def __init__(self, *args, **kwargs):
if len(args) <= 5:
self._compute_bounding_box(*args, **kwargs)
else:
self._set_variables(*args)
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
center = points.mean(0)
centered_points = points - center
orientation = vectors.sum(0)
orientation /= norm(orientation)
orthogonal_direction = orthogonal_vector(orientation)
orthogonal_direction /= norm(orthogonal_direction)
points_orthogonal = dot(
orthogonal_direction,
centered_points.T
)
points_orientation = dot(orientation, centered_points.T)
max_main = points_orientation.max()
min_main = points_orientation.min()
max_orthogonal = points_orthogonal.max()
min_orthogonal = points_orthogonal.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction * max_orthogonal,
orientation * max_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * max_orthogonal,
)) + center)
center = bounding_box_corners.mean(0)
volume = (max_main - min_main) * (max_orthogonal - min_orthogonal)
self.orthogonal = orthogonal_direction
self.points_orientation = points_orientation
self.points_orthogonal = points_orthogonal
self._set_variables(
bounding_box_corners, center, orientation,
labels, points, point_ids, vectors, volume,
None, None, None, level
)
def _set_variables(self,
box,
center,
orientation,
labels,
points,
point_ids,
vectors,
volume,
parent,
left,
right,
level
):
self.box = box
self.center = center
self.orientation = orientation
self.labels = labels
self.points = points
self.point_ids = point_ids
self.vectors = vectors
self.volume = volume
self.parent = parent
self.left = left
self.right = right
self.level = level
self._calculate_orientation_limits()
self._calculate_orthogonal_limits()
def _calculate_orientation_limits(self):
projections = [dot(self.orientation, point) for point in self.box]
self.orientation_limits = (min(projections), max(projections))
def _calculate_orthogonal_limits(self):
projections = [dot(self.orthogonal, point) for point in self.box]
self.orthogonal_limits = (min(projections), max(projections))
def siblings(self, generations_up=0, generations_down=0):
if generations_up == 0 and generations_down == 1:
left = [self.left] if self.left is not None else []
right = [self.right] if self.right is not None else []
return left + right
elif generations_up > 0:
if self.parent is None:
return []
return self.parent.siblings(generations_up - 1, generations_down + 1)
elif generations_down > 1:
if self.left is not None:
left = self.left.siblings(0, generations_down - 1)
else:
left = []
if self.right is not None:
right = self.right.siblings(0, generations_down - 1)
else:
right = []
return left + right
def swap_direction(self):
self.orientation *= -1
self._calculate_orientation_limits()
def overlap_main(self, box):
projections = [dot(self.orientation, point) for point in box.box]
orientation_limits = (min(projections), max(projections))
if (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return True
return False
def center_signed_orientational_distance(self, box):
return dot(self.orientation, self.center - box.center)
def center_distance(self, box):
return norm(box.center - self.center)
def __repr__(self):
return self.box.__repr__() + '\n' +\
'level:' + repr(self.level)
def __str__(self):
return self.box.__str__() + '\n' +\
'level:' + str(self.level)
class Box3D(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1,
centered_points.T
)
points_orthogonal2 = dot(
orthogonal_direction2,
centered_points.T
)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = (
points_orthogonal1.max(),
points_orthogonal1.min()
)
max_orthogonal2, min_orthogonal2 = (
points_orthogonal2.max(),
points_orthogonal2.min()
)
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
class Box3DRich(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None, robustify=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
if robustify == 'points' and len(points) > 4:
p_mean = points.mean(0)
p_cov = cov(points.T)
c_points = points - p_mean
z = (solve(p_cov, c_points.T) * c_points.T).sum(0)
cutoff = 9.3484036044961485 # chi2.ppf(.975, 3)
points = points[z < cutoff]
point_ids = point_ids[z < cutoff]
print 'Discarded', (len(original_points) - len(points)) * 1. / len(points)
vectors = vectors[z < cutoff]
if labels is not None:
labels = labels[z < cutoff]
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1, centered_points.T)
points_orthogonal2 = dot(
orthogonal_direction2, centered_points.T)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = points_orthogonal1.max(
), points_orthogonal1.min()
max_orthogonal2, min_orthogonal2 = points_orthogonal2.max(
), points_orthogonal2.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
def orthogonal_vector(vector, tol=1e-8):
a_vector = abs(vector)
if len(vector) == 3:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (1, 0, -1)
elif a_vector[2] > tol:
orthogonal = vector[::-1] * (-1, 0, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (-1, 0, 0)
else:
raise ValueError('vector must have non-null norm')
else:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (-1, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (1, -1)
else:
raise ValueError('vector must have non-null norm')
orthogonal /= norm(orthogonal)
return orthogonal
def box_cut(points, direction, mapped_points=None, max_main=None, min_main=None):
if mapped_points is None:
mapped_points = dot(direction, points.T)
if max_main is None:
max_main = mapped_points.max()
if min_main is None:
min_main = mapped_points.min()
mid_main = (max_main + min_main) / 2.
split1 = where(mapped_points <= mid_main)
split2 = where(mapped_points > mid_main)
return split1, split2
def all_obb_2d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None):
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box2D(points, vectors, labels, level)
level += 1
if len(unique(labels)) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
left, right = box_cut(points, box_center.orthogonal,
mapped_points=box_center.points_orthogonal)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) >= len(unique(labels)) * intersection_threshold:
split_along_fiber = True
else:
points_left = points[left]
vectors_left = vectors[left]
box_left = Box2D(points_left, vectors_left, labels_left)
points_right = points[right]
vectors_right = vectors[right]
box_right = Box2D(points_left, vectors_left, labels_left)
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique(labels)) * intersection_threshold:
return [box_center]
points_left = points[left]
vectors_left = vectors[left]
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
vectors_right = vectors[right]
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def all_obb_3d_along_tract(
points, vectors, labels, tol=1e-8, level=0,
intersection_threshold=.8, split_threshold=.2,
box=None, clean=False, point_ids=None
):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
left, right = box_cut(
points, box_center.orientation,
mapped_points=box_center.points_orientation
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
if (
(len(labels_both) <= len(unique_labels) * intersection_threshold) and
(box_center.points_orientation.ptp() / 2. < min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d_along_tract(
new_points, new_vectors, new_labels,
tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean
)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, clean=False, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(
points, point_ids, vectors,
labels, level
)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
points, getattr(box_center, orientation),
mapped_points=getattr(box_center, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
print level
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d(
new_points, new_vectors, new_labels, tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d_nr(points_, vectors_, labels_, tol=1e-8, level_=0, intersection_threshold=.8, split_threshold=.2, robustify=None, point_ids_=None):
if point_ids_ is None:
point_ids_ = arange(len(points_))
root = Box3D(points_, point_ids_, vectors_, labels_, level_, robustify=robustify)
stack = [root]
total_points = len(points_)
points_done = 0
while len(stack):
box = stack.pop()
level = box.level + 1
if len(box.points) == 1:
continue
unique_labels = unique(box.labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
box.points, getattr(box, orientation),
mapped_points=getattr(box, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': box.labels[left],
'right': box.labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
print level, len(unique_labels), len(box.points), total_points - points_done
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
points_done += len(box.points)
continue
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = box.points[mask]
new_point_ids = box.point_ids[mask]
new_vectors = box.vectors[mask]
if len(new_points) > 1 and len(new_points) < len(box.points):
new_box = Box3D(new_points, new_point_ids, new_vectors, new_labels, level, robustify=robustify)
setattr(box, side, new_box)
getattr(box, side).parent = box
print "\tAdded to stack ", side
stack.append(new_box)
else:
points_done += len(new_points)
return root
def all_obb_3d_old(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
o1_left, o1_right = box_cut(
points, box_center.orthogonal1, mapped_points=box_center.points_orthogonal1)
o2_left, o2_right = box_cut(
points, box_center.orthogonal2, mapped_points=box_center.points_orthogonal2)
o1_labels_left = labels[o1_left]
o1_labels_right = labels[o1_right]
o2_labels_left = labels[o2_left]
o2_labels_right = labels[o2_right]
unique_labels = unique(labels)
if (
len(intersect1d(o1_labels_left, o1_labels_right)) > 0 and
len(intersect1d(o2_labels_left, o2_labels_right)) > 0
):
split_along_fiber = True
else:
o1_box_left = Box3D(points[o1_left], vectors[o1_left], o1_labels_left)
o1_box_right = Box3D(points[
o1_right], vectors[o1_right], o1_labels_right)
o2_box_left = Box3D(points[o2_left], vectors[o2_left], o2_labels_left)
o2_box_right = Box3D(points[
o2_right], vectors[o2_right], o2_labels_right)
if (o1_box_left.volume + o1_box_right.volume) < (o1_box_left.volume + o2_box_right.volume):
box_left = o1_box_left
box_right = o1_box_right
else:
box_left = o2_box_left
box_right = o2_box_right
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_3d(
box_left.points, box_left.vectors, box_left.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_3d(
box_right.points, box_right.vectors, box_right.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique_labels) * intersection_threshold:
return [box_center]
points_left = points[left]
point_ids_left = point_ids[left]
vectors_left = vectors[left]
left = all_obb_3d(
points_left, point_ids_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
point_ids_right = point_ids[left]
vectors_right = vectors[right]
right = all_obb_3d(
points_right, point_ids_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def point_coverage_by_level(obbs, points):
level = 0
points_level = [obb.points for obb in obbs if obb.level == level]
level_coverage = []
while len(points_level) > 0:
level_coverage.append(sum((len(
points) for points in points_level)) * 1. / len(points))
level += 1
points_level = [obb.points for obb in obbs if obb.level == level if len(
obb.points) > 0]
return array(level_coverage)
def draw_boxes_2d(obbs, level, color=None, **args):
from pylab import plot, cm
for i, obb in enumerate(obbs):
if obb.level != level:
continue
box = vstack([obb.box, obb.box[0]])
if color is None:
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
else:
plot(box.T[0], box.T[
1], lw=5, hold=True, c=cm.jet(color[i]), **args)
def draw_box_2d(obbs, **args):
from pylab import plot, quiver
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
box = vstack([obb.box, obb.box[0]])
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
quiver([obb.center[0]], [obb.center[1]], [obb.orientation[
0]], [obb.orientation[1]], pivot='middle', hold=True, **args)
def draw_box_3d(obbs, tube_radius=1, color=None, **kwargs):
from mayavi.mlab import plot3d
from numpy.random import rand
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
if color is None:
color_ = tuple(rand(3))
else:
color_ = color
box = obb.box
b1 = vstack([box[:4], box[0]]).T
b2 = vstack([box[4:], box[4]]).T
es = [vstack([b1.T[i], b2.T[i]]).T for i in xrange(4)]
plot3d(b1[0], b1[1], b1[
2], tube_radius=tube_radius, color=color_, **kwargs)
plot3d(b2[0], b2[1], b2[
2], tube_radius=tube_radius, color=color_, **kwargs)
[plot3d(e[0], e[1], e[
2], tube_radius=tube_radius, color=color_, **kwargs) for e in es]
def oriented_trace(obb, positive=True, generations=2, angle_threshold=pi / 4):
tract = [obb]
center = obb
candidates = center.siblings(generations)
if positive:
sg = 1
else:
sg = -1
while len(candidates) > 0:
next_candidate_distance = inf
for c in candidates:
signed_distance = sg *\
sign(center.center_signed_orientational_distance(c)) *\
center.center_distance(c)
if (signed_distance <= 0) or\
not center.overlap_orthogonal(c) or\
arccos(dot(center.orientation, c.orientation)) > angle_threshold:
continue
if signed_distance < next_candidate_distance:
next_candidate_distance = signed_distance
next_candidate = c
if next_candidate_distance < inf:
if next_candidate in tract:
break
tract.append(next_candidate)
if dot(center.orientation, next_candidate.orientation) < 0:
next_candidate.swap_direction()
center = next_candidate
candidates = center.siblings(generations)
else:
break
return tract
def trace(obb, generations=2, angle_threshold=pi / 4):
trace_positive = oriented_trace(obb, True, generations, angle_threshold)
trace_negative = oriented_trace(obb, False, generations, angle_threshold)
return trace_negative[::-1] + trace_positive
def get_most_probable_trace(obbs, generations=2, angle_threshold=pi / 4, return_all=True):
traces_list = [trace(obb, generations=generations,
angle_threshold=angle_threshold) for obb in obbs]
traces_w_set = [(t, set(t)) for t in traces_list]
n = 1. * len(traces_w_set)
traces_with_frequency = []
while len(traces_w_set) > 0:
trace_ = traces_w_set.pop()
traces_w_set_new = []
count = 1
for t in traces_w_set:
if t[1] == trace_[1]:
count += 1
else:
traces_w_set_new.append(t)
traces_with_frequency.append((count / n, trace_[0]))
traces_w_set = traces_w_set_new
traces_with_frequency.sort(cmp=lambda x, y: int(sign(y[0] - x[0])))
return traces_with_frequency
def get_level(tree, level):
if tree is None or tree.level > level:
return []
elif tree.level == level:
return [tree]
else:
return get_level(tree.left, level) + get_level(tree.right, level)
def overlapping_boxes(tree, box, levels=None, threshold=0.):
if tree is None:
return []
overlap = tree.overlap_volume(box)
if overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def containing_boxes(tree, box, levels=None, threshold=1.):
if tree is None or tree.level > max(levels):
return []
normalized_overlap = tree.overlap_volume(box) / box.volume
if normalized_overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def min_max(vector, axis=None):
return array((vector.min(axis), vector.max(axis)))
def overlap_vtk(self, box):
a = self
b = box
axes_a = vstack((a.orientation, a.orthogonal1, a.orthogonal2))
axes_b = vstack((b.orientation, b.orthogonal1, b.orthogonal2))
a2b = b.center - a.center
a_a2b_limits = min_max(dot(a2b, a.box.T))
b_a2b_limits = min_max(dot(a2b, a.box.T))
if (
a_a2b_limits[0] < b_a2b_limits[1] or
b_a2b_limits[1] < a_a2b_limits[0]
):
return False
def obb_tree_dfs(obb_tree):
for obb in obb_tree:
if obb.level == 0:
root = obb
break
else:
raise ValueError('No root in the tree')
return obb_tree_dfs_recursive(root)
def obb_tree_dfs_recursive(obb_node):
if obb_node is None:
return []
if obb_node.left is None and obb_node.right is None:
return [obb_node]
return obb_tree_dfs_recursive(obb_node.left) + obb_tree_dfs_recursive(obb_node.right)
def prototype_tract(
tracts, obb_tree=None, intersection_threshold=.01, minimum_level=0,
clean=False, return_obb_tree=False, return_leave_centers=False
):
if obb_tree is None:
points = vstack([t[:-1] for t in tracts])
vectors = vstack([t[1:] - t[:-1] for t in tracts])
labels = hstack([repeat(i, len(t) - 1) for i, t in enumerate(tracts)])
obb_tree = all_obb_3d_along_tract(
points, vectors, labels,
intersection_threshold=intersection_threshold, clean=clean
)
if minimum_level < 0:
max_level = max((obb.level for obb in obb_tree))
minimum_level = max_level + 1 - minimum_level
leave_centers = array(
[obb.center for obb in obb_tree if obb.left is None and obb.right is None and obb.level >
minimum_level]
)
mse_tract = array([
((t[..., None] - leave_centers[..., None].T) ** 2).sum(1).min(0).sum()
for t in tracts
])
tract_index = mse_tract.argmin()
if return_obb_tree or return_leave_centers:
res = (tract_index,)
if return_obb_tree:
res += (obb_tree,)
if return_leave_centers:
res += (leave_centers,)
return res
else:
return tract_index
def obb_tree_level(obb_tree, level, include_superior_leaves=True):
if not isinstance(obb_tree, Box3D):
node = obb_tree[0]
for n in obb_tree:
if n.level < node.level:
node = n
else:
node = obb_tree
return obb_tree_level_dfs(node, level, include_superior_leaves=include_superior_leaves)
def obb_tree_level_dfs(obb_node, level, include_superior_leaves=True):
if obb_node is None or obb_node.level > level:
return []
if (
obb_node.level == level or
(
include_superior_leaves and
obb_node.level < level and
obb_node.left is None and obb_node.right is None
)
):
return [obb_node]
return (
obb_tree_level_dfs(obb_node.left, level, include_superior_leaves=include_superior_leaves) +
obb_tree_level_dfs(
obb_node.right, level, include_superior_leaves=include_superior_leaves)
)
def obb_from_tractography(tractography, *args, **kwargs):
along_tract = False
if 'along_tract' in kwargs and kwargs['along_tract']:
along_tract = True
fibers = tractography.tracts()
points = vstack([f[:-1] for f in fibers])
vectors = vstack([f[1:] - f[:-1] for f in fibers])
labels = hstack([repeat(i, len(f) - 1) for i, f in enumerate(fibers)])
if along_tract:
obbs3d = all_obb_3d_along_tract(
points, vectors, labels, **kwargs
)
else:
obbs3d = all_obb_3d_nr(
points, vectors, labels, **kwargs
)
return obbs3d
|
|
import random
import math
def gen_gradient(args, resource, inflow, radius, loc, common=True):
"""
Returns a line of text to add to an environment file, initializing a
gradient resource with the specified
name (string), inflow(int), radius(int), and location (tuple of ints)
"""
return "".join(["GRADIENT_RESOURCE ", str(resource), ":height=",
str(radius), ":plateau=", str(inflow), ":spread=",
str(radius-1), ":common=", str(int(common)),
":updatestep=1000000:peakx=", str(loc[0]), ":peaky=",
str(loc[1]), ":plateau_inflow=", str(inflow), ":initial=",
str(inflow) + "\n"])
def gen_res(args, resource, inflow, outflow):
"""
Returns a line of text to add to an environment file, initializing a
standard resource with the specified name (string), inflow(int), and
outflow(int)
"""
return "".join(["RESOURCE ", resource, ":inflow=", str(inflow),
":outflow=", str(outflow), "\n"])
def gen_cell(args, resource, cells):
return "".join(["CELL ", resource, ":",
",".join([str(i) for i in cells]),
":inflow=", str(args.cellInflow), ":outflow=",
str(args.cellOutflow), ":initial=", str(args.inflow),
"\n"])
def gen_reaction(args, resource, depletable=0):
"""
Returns a line of text to add to an environment file, initializing a
reaction that uses the resource specified in the first
argument to perform the associated task (resource names are expected to
be of the form "resTASK#" where "TASK" corresponds
to the task the resource is associated with and # is an integer uniquely
identifying that specific gradient resource. For
example, the first AND resource would be named resAND0). An optional
second argument (int) specifies whether or not the reaction
should deplete the resource (by default it will not).
"""
task = resource.lower()
if task[:3] == "res":
task = task[3:]
while task[-1].isdigit():
task = task[:-1]
name = resource[3:]
return "".join(["REACTION ", name, " ", task, " process:resource=",
resource, ":value=", str(args.taskValDict[task]), ":type=",
args.rxnType, ":frac=", str(args.frac), ":max=",
str(args.resMax), ":depletable=", str(int(depletable)),
" requisite:max_count=", str(args.maxCount), "\n"])
def calcEvenAnchors(args):
"""
Calculates anchor points evenly spaced across the world, given
user-specified parameters.
Note: May not be exactly even if world size is not divisible by
patches+1.
Note: Eveness is based on bounded world, not toroidal.
"""
anchors = []
dist = int((args.worldSize)/(args.patchesPerSide+1))
for i in range(dist, args.worldSize, dist):
for j in range(dist, args.worldSize, dist):
anchors.append((i, j))
return anchors
def calcRandomAnchors(args, inworld=True):
"""
Generates a list of random anchor points such that all circles will fit
in the world, given the specified radius and worldsize.
The number of anchors to generate is given by nPatches
"""
anchors = []
rng = (args.patchRadius, args.worldSize - args.patchRadius)
if not inworld:
rng = (0, args.worldSize)
for i in range(args.nPatches):
anchors.append((random.randrange(rng[0], rng[1]),
random.randrange(rng[0], rng[1])))
return anchors
def pairwise_point_combinations(xs, ys, anchors):
"""
Does an in-place addition of the four points that can be composed by
combining coordinates from the two lists to the given list of anchors
"""
for i in xs:
anchors.append((i, max(ys)))
anchors.append((i, min(ys)))
for i in ys:
anchors.append((max(xs), i))
anchors.append((min(xs), i))
def get_surrounding_coord_even(coord, d, dsout):
return [coord + int(math.floor(d/2.0))+d*i for i in range(dsout)]\
+ [coord - (int(math.ceil(d/2.0))+d*i) for i in range(dsout)]
def get_surrounding_coord_odd(coord, d, dsout):
return [coord + d*i for i in range(dsout)] +\
[coord - d*i for i in range(dsout)]
def add_anchors(centerPoint, d, dsout, anchors, even=True):
if even:
xs = get_surrounding_coord_even(centerPoint[0], d, dsout)
ys = get_surrounding_coord_even(centerPoint[0], d, dsout)
else:
xs = get_surrounding_coord_odd(centerPoint[0], d, dsout)
ys = get_surrounding_coord_odd(centerPoint[0], d, dsout)
pairwise_point_combinations(xs, ys, anchors)
def calcTightAnchors(args, d, patches):
"""
Recursively generates the number of anchor points specified in the
patches argument, such that all patches are d cells away
from their nearest neighbors.
"""
centerPoint = (int(args.worldSize/2), int(args.worldSize/2))
anchors = []
if patches == 0:
pass
elif patches == 1:
anchors.append(centerPoint)
elif patches % 2 == 0:
dsout = int((patches-2)//2) + 1
add_anchors(centerPoint, d, dsout, anchors, True)
if d != 0:
anchors = list(set(anchors))
anchors.sort()
if dsout != 1:
return (anchors +
calcTightAnchors(args, d, patches-2)
)[:patches*patches]
# to cut off the extras in the case where d=0
else:
# Note - an odd number of args.patchesPerSide requires that there be
# a patch at the centerpoint
dsout = int((patches-1)//2)
add_anchors(centerPoint, d, dsout, anchors, False)
if dsout != 1:
return anchors + calcTightAnchors(d, patches-2)
return anchors
def random_patch(args, size):
start_point = [random.randrange(0, args.worldSize),
random.randrange(0, args.worldSize)]
curr_patch = [start_point]
perimeter = [start_point]
while len(curr_patch) < size:
to_expand = random.choice(perimeter)
neighbors = get_moore_neighbors_toroidal(args, to_expand)
neighbors = [n for n in neighbors if n not in curr_patch]
if len(neighbors) == 0:
perimeter.remove(to_expand)
continue
elif len(neighbors) == 1:
perimeter.remove(to_expand)
next_cell = random.choice(neighbors)
perimeter.append(next_cell)
curr_patch.append(next_cell)
index_cells = []
for cell in curr_patch:
index_cells.append(cell[0] % args.worldSize + cell[1]*args.worldSize)
return index_cells
def get_moore_neighbors_toroidal(args, cell):
neighbors = []
for x in range(cell[0]-1, cell[0]+2):
for y in range(cell[1]-1, cell[1]+2):
neighbors.append([x, y])
for j in range(2):
if neighbors[-1][j] < 0:
neighbors[-1][j] += args.worldSize
elif neighbors[-1][j] >= args.worldSize:
neighbors[-1][j] -= args.worldSize
neighbors.remove(cell)
return neighbors
def genRandResources(args, resources):
"""
Generates a list of the appropriate length containing a roughly equal
number of all resources in a random order
"""
randResources = []
nEach = int(args.nPatches // len(resources))
extras = int(args.nPatches % len(resources))
for i in range(nEach):
for res in resources:
randResources.append(res + str(i))
additional = random.sample(resources, extras)
for res in additional:
randResources.append(res + str(nEach))
random.shuffle(randResources)
return randResources
def place_even_squares(args, side):
cells = []
for y in range(0, args.worldSize, side*2):
for x in range(0, args.worldSize, side*2):
for z in range(side):
for z2 in range(side):
cells.append((y+z)*args.worldSize + x + z2)
cells.sort()
return cells
|
|
#(C) Copyright Syd Logan 2017-2020
#(C) Copyright Thousand Smiles Foundation 2017-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from rest_framework.views import APIView
from rest_framework.exceptions import APIException, NotFound
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from register.models import *
from clinic.models import *
from patient.models import *
from datetime import *
from django.core import serializers
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest, HttpResponseServerError, HttpResponseNotFound
from common.decorators import *
import sys
import numbers
import json
class RegisterView(APIView):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def serialize(self, entry):
m = {}
m["id"] = entry.id
m["clinic"] = entry.clinic_id
m["patient"] = entry.patient_id
m["timein"] = entry.timein
m["timeout"] = entry.timeout
if entry.state == 'i':
m["state"] = "Checked In"
else:
m["state"] = "Checked Out"
return m
@log_request
def get(self, request, register_id=None, format=None):
register = None
badRequest = False
aPatient = None
aClinic = None
kwargs = {}
if register_id:
try:
register = Register.objects.get(id = register_id)
except:
register = None
else:
# look for optional arguments
patientid = request.GET.get("patient", '')
if patientid and patientid != '':
try:
aPatient = Patient.objects.get(id=patientid)
if not aPatient:
badRequest = True
else:
kwargs["patient"] = aPatient
except:
badRequest = True
clinicid = request.GET.get("clinic", '')
if clinicid and clinicid != '':
try:
aClinic = Clinic.objects.get(id=clinicid)
if not aClinic:
badRequest = True
else:
kwargs["clinic"] = aClinic
except:
badRequest = True
try:
state = data["state"]
if state != "Checked In" and state != "Checked Out":
badRequest = True
else:
if state == "Checked In":
kwargs["state"] = 'i';
else:
kwargs["state"] = 'o';
except:
pass # no state
if not badRequest:
try:
register = Register.objects.filter(**kwargs)
except:
register = None
if not register and not badRequest:
raise NotFound
elif not badRequest:
if register_id:
ret = self.serialize(register)
else:
ret = []
for x in register:
ret.append(self.serialize(x))
if badRequest:
return HttpResponseBadRequest()
else:
return Response(ret)
def validatePostArgs(self, data):
valid = True
kwargs = data
if "state" in data:
if not (data["state"] == "Checked In" or data["state"] == "Checked Out"):
valid = False
else:
if data["state"] == "Checked In":
kwargs["state"] = "i"
else:
kwargs["state"] = "o"
return valid, kwargs
def validatePutArgs(self, data, register):
valid = True
try:
if "state" in data:
if not (data["state"] == "Checked In" or data["state"] == "Checked Out"):
valid = False
else:
if data["state"] == "Checked In":
register.state = 'i'
register.timein = datetime.now()
else:
register.state = 'o'
register.timeout = datetime.now()
else:
valid = False
except:
valid = False
return valid, register
@log_request
def post(self, request, format=None):
badRequest = False
implError = False
data = json.loads(request.body)
try:
patientid = int(data["patient"])
except:
badRequest = True
try:
clinicid = int(data["clinic"])
except:
badRequest = True
# validate the post data, and get a kwargs dict for
# creating the object
valid, kwargs = self.validatePostArgs(data)
if not valid:
badRequest = True
if not badRequest:
# get the instances
try:
aPatient = Patient.objects.get(id=patientid)
except:
aPatient = None
try:
aClinic = Clinic.objects.get(id=clinicid)
except:
aClinic = None
if not aPatient or not aClinic:
raise NotFound
if not badRequest:
try:
kwargs["clinic"] = aClinic
kwargs["patient"] = aPatient
register = Register(**kwargs)
if register:
register.save()
else:
implError = True
except Exception as e:
implError = True
implMsg = sys.exc_info()[0]
if badRequest:
return HttpResponseBadRequest()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({'id': register.id})
@log_request
def put(self, request, register_id=None, format=None):
badRequest = False
implError = False
notFound = False
if not register_id:
badRequest = True
if not badRequest:
register = None
try:
register = Register.objects.get(id=register_id)
except:
pass
if not register:
notFound = True
else:
try:
data = json.loads(request.body)
valid, register = self.validatePutArgs(data, register)
if valid:
register.save()
else:
badRequest = True
except:
implError = True
implMsg = sys.exc_info()[0]
if badRequest:
return HttpResponseBadRequest()
if notFound:
return HttpResponseNotFound()
if implError:
return HttpResponseServerError(implMsg)
else:
return Response({})
@log_request
def delete(self, request, register_id=None, format=None):
register = None
# see if the state change object exists
if not register_id:
return HttpResponseBadRequest()
try:
register = Register.objects.get(id=register_id)
except:
register = None
if not register:
raise NotFound
else:
register.delete()
return Response({})
|
|
# -*- coding: utf-8 -*-
"""A searcher to find file entries within a file system."""
import re
import sre_constants
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.path import factory as path_spec_factory
class FindSpec(object):
"""Find specification object."""
def __init__(
self, file_entry_types=None, is_allocated=True, location=None,
location_regex=None, case_sensitive=True):
"""Initializes the find specification object.
Args:
file_entry_types: Optional file entry types list or None to indicate
no preference. The default is None.
is_allocated: Optional boolean value to indicate the file entry should
be allocated, where None represents no preference.
The default is True.
location: Optional location string or list of location segments, or None
to indicate no preference. The location should be defined
relative to the root of the file system. The default is None.
Note that the string will be split into segments based on the
file system specific path segment separator.
location_regex: Optional location regular expression string or list of
location regular expression segments, or None to indicate
no preference. The location regular expression should be
defined relative to the root of the file system. The
default is None. Note that the string will be split into
segments based on the file system specific path segment
separator.
case_sensitive: Optional boolean value to indicate string matches should
be case sensitive. The default is true.
Raises:
TypeError: if the location or location_regex type is not supported.
ValueError: if both location and location_regex are set.
"""
if location is not None and location_regex is not None:
raise ValueError(
u'The location and location_regex arguments cannot be used at same '
u'time.')
super(FindSpec, self).__init__()
self._file_entry_types = file_entry_types
self._is_allocated = is_allocated
self._is_case_sensitive = case_sensitive
self._is_regex = None
self._location = None
self._location_regex = None
self._location_segments = None
self._number_of_location_segments = 0
# TODO: add support for globbing?
if location is not None:
if isinstance(location, basestring):
self._location = location
elif isinstance(location, list):
self._location_segments = location
else:
raise TypeError(u'Unsupported location type: {0:s}.'.format(
type(location)))
self._is_regex = False
elif location_regex is not None:
if isinstance(location_regex, basestring):
self._location_regex = location_regex
elif isinstance(location_regex, list):
self._location_segments = location_regex
else:
raise TypeError(u'Unsupported location_regex type: {0:s}.'.format(
type(location_regex)))
self._is_regex = True
# TODO: add support for name
# TODO: add support for owner (user, group)
# TODO: add support for permissions (mode)
# TODO: add support for size
# TODO: add support for time values
# TODO: add support for expression e.g.
# attribute['$FILE_NAME'].creation_type == 'x'
def _CheckFileEntryType(self, file_entry):
"""Checks the file entry type find specifications.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not or
None if no file entry type specification is defined.
"""
if self._file_entry_types is None:
return
return (self._CheckIsDevice(file_entry) or
self._CheckIsDirectory(file_entry) or
self._CheckIsFile(file_entry) or
self._CheckIsLink(file_entry) or
self._CheckIsPipe(file_entry) or
self._CheckIsSocket(file_entry))
def _CheckIsAllocated(self, file_entry):
"""Checks the is_allocated find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not or
None if no allocation specification is defined.
"""
if self._is_allocated is None:
return
return self._is_allocated == file_entry.IsAllocated()
def _CheckIsDevice(self, file_entry):
"""Checks the is_device find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_DEVICE not in self._file_entry_types:
return False
return file_entry.IsDevice()
def _CheckIsDirectory(self, file_entry):
"""Checks the is_directory find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_DIRECTORY not in self._file_entry_types:
return False
return file_entry.IsDirectory()
def _CheckIsFile(self, file_entry):
"""Checks the is_file find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_FILE not in self._file_entry_types:
return False
return file_entry.IsFile()
def _CheckIsLink(self, file_entry):
"""Checks the is_link find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_LINK not in self._file_entry_types:
return False
return file_entry.IsLink()
def _CheckIsPipe(self, file_entry):
"""Checks the is_pipe find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_PIPE not in self._file_entry_types:
return False
return file_entry.IsPipe()
def _CheckIsSocket(self, file_entry):
"""Checks the is_socket find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
Returns:
True if the file entry matches the find specification, False if not.
"""
if definitions.FILE_ENTRY_TYPE_SOCKET not in self._file_entry_types:
return False
return file_entry.IsSocket()
def _CheckLocation(self, file_entry, search_depth):
"""Checks the location find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
search_depth: the search depth.
Returns:
True if the file entry matches the find specification, False if not.
"""
if self._location_segments is None:
return False
if search_depth < 0 or search_depth > self._number_of_location_segments:
return False
# Note that the root has no entry in the location segments and
# no name to match.
if search_depth == 0:
segment_name = u''
else:
segment_name = self._location_segments[search_depth - 1]
if self._is_regex:
if isinstance(segment_name, basestring):
# Allow '\n' to be matched by '.' and make '\w', '\W', '\b', '\B',
# '\d', '\D', '\s' and '\S' Unicode safe.
flags = re.DOTALL | re.UNICODE
if not self._is_case_sensitive:
flags |= re.IGNORECASE
try:
segment_name = r'^{0:s}$'.format(segment_name)
segment_name = re.compile(segment_name, flags=flags)
except sre_constants.error:
# TODO: set self._location_segments[search_depth - 1] to None ?
return False
self._location_segments[search_depth - 1] = segment_name
elif not self._is_case_sensitive:
segment_name = segment_name.lower()
self._location_segments[search_depth - 1] = segment_name
if search_depth > 0:
if self._is_regex:
if not segment_name.match(file_entry.name):
return False
elif self._is_case_sensitive:
if segment_name != file_entry.name:
return False
elif segment_name != file_entry.name.lower():
return False
return True
def AtMaximumDepth(self, search_depth):
"""Determines if the find specification is at maximum depth.
Args:
search_depth: the search depth.
Returns:
True if at maximum depth, False if not.
"""
if self._location_segments is not None:
if search_depth == self._number_of_location_segments:
return True
return False
def Initialize(self, file_system):
"""Initializes find specification for matching.
Args:
file_system: the file system object (instance of vfs.FileSystem).
"""
if self._location is not None:
self._location_segments = file_system.SplitPath(self._location)
elif self._location_regex is not None:
self._location_segments = file_system.SplitPath(self._location_regex)
if self._location_segments is not None:
self._number_of_location_segments = len(self._location_segments)
def Matches(self, file_entry, search_depth):
"""Determines if the file entry matches the find specification.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
search_depth: the search depth.
Returns:
A tuple containing:
True if the file entry matches the find specification, False otherwise.
True if the location matches, False if not or None if no location
specified.
"""
if self._location_segments is None:
location_match = None
else:
location_match = self._CheckLocation(file_entry, search_depth)
if not location_match:
return False, location_match
if search_depth != self._number_of_location_segments:
return False, location_match
match = self._CheckFileEntryType(file_entry)
if match is not None and not match:
return False, location_match
match = self._CheckIsAllocated(file_entry)
if match is not None and not match:
return False, location_match
return True, location_match
class FileSystemSearcher(object):
"""Searcher object to find file entries within a file system."""
def __init__(self, file_system, mount_point):
"""Initializes the file system searcher.
Args:
file_system: the file system object (instance of vfs.FileSystem).
mount_point: the mount point path specification (instance of
path.PathSpec).
Raises:
PathSpecError: if the mount point path specification is incorrect.
ValueError: when file system or mount point is not set.
"""
if not file_system or not mount_point:
raise ValueError(u'Missing file system or mount point value.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
file_system.type_indicator):
if not hasattr(mount_point, u'location'):
raise errors.PathSpecError(
u'Mount point path specification missing location.')
super(FileSystemSearcher, self).__init__()
self._file_system = file_system
self._mount_point = mount_point
def _FindInFileEntry(self, file_entry, find_specs, search_depth):
"""Searches for matching file entries within the file entry.
Args:
file_entry: the file entry (instance of vfs.FileEntry).
find_specs: a list of find specifications (instances of FindSpec).
search_depth: the search depth.
Yields:
The path specification of the matching file entries (instances of
path.PathSpec).
"""
sub_find_specs = []
for find_spec in find_specs:
match, location_match = find_spec.Matches(file_entry, search_depth)
if match:
yield file_entry.path_spec
if location_match != False and not find_spec.AtMaximumDepth(search_depth):
sub_find_specs.append(find_spec)
if not sub_find_specs:
return
search_depth += 1
try:
for sub_file_entry in file_entry.sub_file_entries:
for matching_path_spec in self._FindInFileEntry(
sub_file_entry, sub_find_specs, search_depth):
yield matching_path_spec
except errors.AccessError:
pass
def Find(self, find_specs=None):
"""Searches for matching file entries within the file system.
Args:
find_specs: a list of find specifications (instances of FindSpec).
The default is None, which will return all allocated
file entries.
Yields:
The path specification of the matching file entries (instances of
path.PathSpec).
"""
if not find_specs:
find_specs.append(FindSpec())
for find_spec in find_specs:
find_spec.Initialize(self._file_system)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
file_entry = self._file_system.GetFileEntryByPathSpec(self._mount_point)
else:
file_entry = self._file_system.GetRootFileEntry()
for matching_path_spec in self._FindInFileEntry(file_entry, find_specs, 0):
yield matching_path_spec
def GetFileEntryByPathSpec(self, path_spec):
"""Retrieves a file entry for a path specification.
Args:
path_spec: a path specification (instance of path.PathSpec).
Returns:
A file entry (instance of vfs.FileEntry) or None.
"""
return self._file_system.GetFileEntryByPathSpec(path_spec)
def GetRelativePath(self, path_spec):
"""Returns the relative path based on a resolved path specification.
The relative path is the location of the upper most path specification.
The the location of the mount point is stripped off if relevant.
Args:
path_spec: the path specification (instance of path.PathSpec).
Returns:
The corresponding relative path or None if the relative path could not
be determined.
Raises:
PathSpecError: if the path specification is incorrect.
"""
location = getattr(path_spec, u'location', None)
if location is None:
raise errors.PathSpecError(u'Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
if not location.startswith(self._mount_point.location):
raise errors.PathSpecError(
u'Path specification does not contain mount point.')
else:
if not hasattr(path_spec, u'parent'):
raise errors.PathSpecError(u'Path specification missing parent.')
if path_spec.parent != self._mount_point:
raise errors.PathSpecError(
u'Path specification does not contain mount point.')
path_segments = self._file_system.SplitPath(location)
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(
self._file_system.type_indicator):
mount_point_path_segments = self._file_system.SplitPath(
self._mount_point.location)
path_segments = path_segments[len(mount_point_path_segments):]
return u'{0:s}{1:s}'.format(
self._file_system.PATH_SEPARATOR,
self._file_system.PATH_SEPARATOR.join(path_segments))
def SplitPath(self, path):
"""Splits the path into path segments.
Args:
path: a string containing the path.
Returns:
A list of path segements without the root path segment, which is an
empty string.
"""
return self._file_system.SplitPath(path)
|
|
# encoding: utf8
from django.test import TestCase
from django.db.migrations.autodetector import MigrationAutodetector, MigrationQuestioner
from django.db.migrations.state import ProjectState, ModelState
from django.db.migrations.graph import MigrationGraph
from django.db import models
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))])
author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))])
author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))])
author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
author_proxy = ModelState("testapp", "AuthorProxy", [], {"proxy": True}, ("testapp.author", ))
author_proxy_notproxy = ModelState("testapp", "AuthorProxy", [], {}, ("testapp.author", ))
other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))])
book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("author", "title")]})
book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("title", "author")]})
edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))])
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model_state(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"Tests auto-naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency(("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector._arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"Tests that trim does not remove dependencies but does remove unwanted apps"
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector._arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, set(["testapp"]))
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_new_model(self):
"Tests autodetection of new models"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "Author")
def test_old_model(self):
"Tests deletion of old models"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Author")
def test_add_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_remove_field(self):
"Tests autodetection of removed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "name")
def test_alter_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
def test_rename_field(self):
"Tests autodetection of renamed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameField")
self.assertEqual(action.old_name, "name")
self.assertEqual(action.new_name, "names")
def test_fk_dependency(self):
"Tests that having a ForeignKey automatically adds a dependency"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 1)
self.assertEqual(len(changes['thirdapp']), 1)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['thirdapp'][0]
self.assertEqual(len(migration3.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 2)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['otherapp'][1]
self.assertEqual(len(migration2.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(len(action.fields), 2)
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "author")
# Right dependencies?
self.assertEqual(migration1.dependencies, [("otherapp", "auto_1")])
self.assertEqual(migration2.dependencies, [])
self.assertEqual(set(migration3.dependencies), set([("otherapp", "auto_1"), ("testapp", "auto_1")]))
def test_unique_together(self):
"Tests unique_together detection"
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("author", "title")]))
def test_unique_together_ordering(self):
"Tests that unique_together also triggers on ordering changes"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("title", "author")]))
def test_proxy_ignorance(self):
"Tests that the autodetector correctly ignores proxy models"
# First, we test adding a proxy model
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_empty, self.author_proxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes), 0)
# Now, we test turning a proxy model into a non-proxy model
before = self.make_project_state([self.author_empty, self.author_proxy])
after = self.make_project_state([self.author_empty, self.author_proxy_notproxy])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "AuthorProxy")
|
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an interface to enumlib, Gus Hart"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables multienum.x and
makestr.x available in the path. Please download the library at
http://enum.sourceforge.net/ and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
Gus L. W. Hart and Rodney W. Forcade, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
Gus L. W. Hart and Rodney W. Forcade, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
Gus L. W. Hart, Lance J. Nelson, and Rodney W. Forcade, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 16, 2012"
import re
import math
import subprocess
import itertools
import logging
import numpy as np
from monty.fractions import lcm
from monty.fractions import fractions
from six.moves import reduce
from pymatgen.io.vaspio.vasp_input import Poscar
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import DummySpecie
from monty.os.path import which
from monty.dev import requires
from monty.tempfile import ScratchDir
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by Gus Hart instead of the older
# "multienum.x"
enum_cmd = which('multienum.x')
@requires(enum_cmd and which('makestr.x'),
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' to be in the path. Please download the library at"
"http://enum.sourceforge.net/ and follow the instructions in "
"the README to compile these two executables accordingly.")
class EnumlibAdaptor(object):
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(self, structure, min_cell_size=1, max_cell_size=1,
symm_prec=0.1, enum_precision_parameter=0.001,
refine_structure=False, check_ordered_symmetry=True):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
def run(self):
"""
Run the enumeration.
"""
#Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
try:
#Generate input files
self._gen_input_file()
#Perform the actual enumeration
num_structs = self._run_multienum()
#Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise ValueError("Unable to enumerate structure.")
except Exception:
import sys
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=10, file=sys.stdout)
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug("Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_spacegroup_symbol(),
fitter.get_spacegroup_number(),
len(symmetrized_structure.equivalent_sites))
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
#Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = {k: v for k, v in sites[0].species_and_occu.items()}
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
#Let us first make add a dummy element for every single
#site whose total occupancies don't sum to 1.
species[DummySpecie("X")] = 1 - sum(species.values())
for sp in species.keys():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(species[sp] * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += species[sp] * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss), self.symm_prec)
sgnum = finder.get_spacegroup_number()
return sgnum
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
min_sgnum = get_sg_info(curr_sites)
logger.debug("Disorderd sites has sgnum %d" % (
min_sgnum))
#It could be that some of the ordered sites has a lower symmetry than
#the disordered sites. So we consider the lowest symmetry sites as
#disordered in our enumeration.
self.ordered_sites = []
to_add = []
if self.check_ordered_symmetry:
for sites in ordered_sites:
temp_sites = list(curr_sites) + sites
sgnum = get_sg_info(temp_sites)
if sgnum < min_sgnum:
logger.debug("Adding {} to sites to be ordered. "
"New sgnum {}"
.format(sites, sgnum))
to_add = sites
min_sgnum = sgnum
for sites in ordered_sites:
if sites == to_add:
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
logger.debug("Lowest symmetry {} sites are included in enum."
.format(sites[0].specie))
for site in sites:
coord_str.append("{} {}".format(
coord_format.format(*site.coords),
sp_label))
disordered_sites.append(sites)
else:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("{}".format(len(index_species)))
output.append("{}".format(len(coord_str)))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("partial")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(ndisordered*reduce(lcm,
[f.limit_denominator(
ndisordered *
self.max_cell_size).denominator
for f in map(fractions.Fraction,
index_amounts)]))
#base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
#To get a reasonable number of structures, we fix concentrations to the
#range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)),
int(round(conc * base)),
base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1,
base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
p = subprocess.Popen([enum_cmd],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match("\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
rs = subprocess.Popen(["makestr.x",
"struct_enum.out", str(0),
str(num_structs - 1)],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
ordered_structure = Structure(
original_latt,
[site.species_and_occu for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites])
inv_org_latt = np.linalg.inv(original_latt.matrix)
for n in range(1, num_structs + 1):
with open("vasp.{:06d}".format(n)) as f:
data = f.read()
data = re.sub("scale factor", "1", data)
data = re.sub("(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
#Enumeration may have resulted in a super lattice. We need to
#find the mapping from the new lattice to the old lattice, and
#perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row]
for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = Structure.from_sites(ordered_structure)
s.make_supercell(transformation)
sites.extend([site.to_unit_cell for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
super_latt).to_unit_cell)
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
|
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import (
P2PInterface,
p2p_lock,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
import random
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
_tokens = 1
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def on_getaddr(self, message):
# When the node sends us a getaddr, it increments the addr relay tokens for the connection by 1000
self._tokens += 1000
@property
def tokens(self):
with p2p_lock:
return self._tokens
def increment_tokens(self, n):
# When we move mocktime forward, the node increments the addr relay tokens for its peers
with p2p_lock:
self._tokens += n
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
self.rate_limit_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def setup_rand_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"{random.randrange(128,169)}.{random.randrange(1,255)}.{random.randrange(1,255)}.{random.randrange(1,255)}"
addr.port = 8333
addrs.append(addr)
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly", "-whitelist=addr@127.0.0.1"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
def send_addrs_and_test_rate_limiting(self, peer, no_relay, new_addrs, total_addrs):
"""Send an addr message and check that the number of addresses processed and rate-limited is as expected"""
peer.send_and_ping(self.setup_rand_addr_msg(new_addrs))
peerinfo = self.nodes[0].getpeerinfo()[0]
addrs_processed = peerinfo['addr_processed']
addrs_rate_limited = peerinfo['addr_rate_limited']
self.log.debug(f"addrs_processed = {addrs_processed}, addrs_rate_limited = {addrs_rate_limited}")
if no_relay:
assert_equal(addrs_processed, 0)
assert_equal(addrs_rate_limited, 0)
else:
assert_equal(addrs_processed, min(total_addrs, peer.tokens))
assert_equal(addrs_rate_limited, max(0, total_addrs - peer.tokens))
def rate_limit_tests(self):
self.mocktime = int(time.time())
self.restart_node(0, [])
self.nodes[0].setmocktime(self.mocktime)
for contype, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]:
self.log.info(f'Test rate limiting of addr processing for {contype} peers')
if contype == "inbound":
peer = self.nodes[0].add_p2p_connection(AddrReceiver())
else:
peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type=contype)
# Send 600 addresses. For all but the block-relay-only peer this should result in addresses being processed.
self.send_addrs_and_test_rate_limiting(peer, no_relay, 600, 600)
# Send 600 more addresses. For the outbound-full-relay peer (which we send a GETADDR, and thus will
# process up to 1001 incoming addresses), this means more addresses will be processed.
self.send_addrs_and_test_rate_limiting(peer, no_relay, 600, 1200)
# Send 10 more. As we reached the processing limit for all nodes, no more addresses should be procesesd.
self.send_addrs_and_test_rate_limiting(peer, no_relay, 10, 1210)
# Advance the time by 100 seconds, permitting the processing of 10 more addresses.
# Send 200 and verify that 10 are processed.
self.mocktime += 100
self.nodes[0].setmocktime(self.mocktime)
peer.increment_tokens(10)
self.send_addrs_and_test_rate_limiting(peer, no_relay, 200, 1410)
# Advance the time by 1000 seconds, permitting the processing of 100 more addresses.
# Send 200 and verify that 100 are processed.
self.mocktime += 1000
self.nodes[0].setmocktime(self.mocktime)
peer.increment_tokens(100)
self.send_addrs_and_test_rate_limiting(peer, no_relay, 200, 1610)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
|
|
from collections import namedtuple
from itertools import chain
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import JSON, JSONB
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.sql.elements import Label
from sqlalchemy.sql.expression import union
from sqlalchemy_utils import get_hybrid_properties
from sqlalchemy_utils.functions import cast_if, get_mapper
from sqlalchemy_utils.functions.orm import get_all_descriptors
from sqlalchemy_utils.relationships import (
path_to_relationships,
select_correlated_expression
)
from .exc import (
IdPropertyNotFound,
InvalidField,
UnknownField,
UnknownFieldKey,
UnknownModel
)
from .hybrids import CompositeId
from .utils import (
adapt,
chain_if,
get_attrs,
get_descriptor_columns,
get_selectable,
s,
subpaths
)
Parameters = namedtuple(
'Parameters',
['fields', 'include', 'sort', 'offset', 'limit']
)
json_array = sa.cast(
postgresql.array([], type_=JSON), postgresql.ARRAY(JSON)
)
jsonb_array = sa.cast(
postgresql.array([], type_=JSONB), postgresql.ARRAY(JSONB)
)
RESERVED_KEYWORDS = (
'id',
'type',
)
class ResourceRegistry(object):
def __init__(self, model_mapping):
self.by_type = model_mapping
self.by_model_class = dict(
(value, key) for key, value in model_mapping.items()
)
class QueryBuilder(object):
"""
1. Simple example
::
query_builder = QueryBuilder({
'articles': Article,
'users': User,
'comments': Comment
})
2. Example using type formatters::
def isoformat(date):
return sa.func.to_char(
date,
sa.text('\'YYYY-MM-DD"T"HH24:MI:SS.US"Z"\'')
).label(date.name)
query_builder = QueryBuilder(
{
'articles': Article,
'users': User,
'comments': Comment
},
type_formatters={sa.DateTime: isoformat}
)
:param model_mapping:
A mapping with keys representing JSON API resource identifier type
names and values as SQLAlchemy models.
It is recommended to use lowercased pluralized and hyphenized names for
resource identifier types. So for example model such as
LeagueInvitiation should have an equivalent key of
'league-invitations'.
:param base_url:
Base url to be used for building JSON API compatible links objects. By
default this is `None` indicating that no link objects will be built.
:param type_formatters:
A dictionary of type formatters
:param sort_included:
Whether or not to sort included objects by type and id.
"""
def __init__(
self,
model_mapping,
base_url=None,
type_formatters=None,
sort_included=True
):
self.validate_model_mapping(model_mapping)
self.resource_registry = ResourceRegistry(model_mapping)
self.base_url = base_url
self.type_formatters = (
{} if type_formatters is None else type_formatters
)
self.sort_included = sort_included
def validate_model_mapping(self, model_mapping):
for model in model_mapping.values():
if 'id' not in get_all_descriptors(model).keys():
raise IdPropertyNotFound(
"Couldn't find 'id' property for model {0}.".format(
model
)
)
def get_resource_type(self, model):
if isinstance(model, sa.orm.util.AliasedClass):
model = sa.inspect(model).mapper.class_
try:
return self.resource_registry.by_model_class[model]
except KeyError:
raise UnknownModel(
'Unknown model given. Could not find model %r from given '
'model mapping.' % model
)
def get_id(self, from_obj):
return cast_if(get_attrs(from_obj).id, sa.String)
def build_resource_identifier(self, model, from_obj):
model_alias = self.get_resource_type(model)
return [
s('id'),
cast_if(
AttributesExpression(
self,
model,
from_obj
).adapt_attribute('id'),
sa.String
),
s('type'),
s(model_alias),
]
def select_related(self, obj, relationship_key, **kwargs):
"""
Builds a query for selecting related resource(s). This method can be
used for building select queries for JSON requests such as::
GET articles/1/author
Usage::
article = session.query(Article).get(1)
query = query_builder.select_related(
article,
'category'
)
:param obj:
The root object to select the related resources from.
:param fields:
A mapping of fields. Keys representing model keys and values as
lists of model descriptor names.
:param include:
List of dot-separated relationship paths.
:param links:
A dictionary of links to apply as top level links in the built
query. Keys representing json keys and values as valid urls or
dictionaries.
:param sort:
List of attributes to apply as an order by for the root model.
:param from_obj:
A SQLAlchemy selectable (for example a Query object) to select the
query results from.
:param as_text:
Whether or not to build a query that returns the results as text
(raw json).
.. versionadded: 0.2
"""
return self._select_related(obj, relationship_key, **kwargs)
def select_relationship(self, obj, relationship_key, **kwargs):
"""
Builds a query for selecting relationship resource(s)::
article = session.query(Article).get(1)
query = query_builder.select_related(
article,
'category'
)
:param obj:
The root object to select the related resources from.
:param sort:
List of attributes to apply as an order by for the root model.
:param links:
A dictionary of links to apply as top level links in the built
query. Keys representing json keys and values as valid urls or
dictionaries.
:param from_obj:
A SQLAlchemy selectable (for example a Query object) to select the
query results from.
:param as_text:
Whether or not to build a query that returns the results as text
(raw json).
.. versionadded: 0.2
"""
kwargs['ids_only'] = True
return self._select_related(obj, relationship_key, **kwargs)
def _select_related(self, obj, relationship_key, **kwargs):
mapper = sa.inspect(obj.__class__)
prop = mapper.relationships[relationship_key]
model = prop.mapper.class_
from_obj = kwargs.pop('from_obj', None)
if from_obj is None:
from_obj = sa.orm.query.Query(model)
# SQLAlchemy Query.with_parent throws warning if the primary object
# foreign key is NULL. Thus we need this ugly magic to return empty
# data in that scenario.
if (
prop.direction.name == 'MANYTOONE' and
not prop.secondary and
getattr(obj, prop.local_remote_pairs[0][0].key) is None
):
expr = sa.cast({'data': None}, JSONB)
if kwargs.get('as_text'):
expr = sa.cast(expr, sa.Text)
return sa.select([expr])
from_obj = from_obj.with_parent(obj, prop)
if prop.order_by:
from_obj = from_obj.order_by(*prop.order_by)
from_obj = from_obj.subquery()
return SelectExpression(self, model, from_obj).build_select(
multiple=prop.uselist,
**kwargs
)
def select(self, model, **kwargs):
"""
Builds a query for selecting multiple resource instances::
query = query_builder.select(
Article,
fields={'articles': ['name', 'author', 'comments']},
include=['author', 'comments.author'],
from_obj=session.query(Article).filter(
Article.id.in_([1, 2, 3, 4])
)
)
Results can be sorted::
# Sort by id in descending order
query = query_builder.select(
Article,
sort=['-id']
)
# Sort by name and id in ascending order
query = query_builder.select(
Article,
sort=['name', 'id']
)
:param model:
The root model to build the select query from.
:param fields:
A mapping of fields. Keys representing model keys and values as
lists of model descriptor names.
:param include:
List of dot-separated relationship paths.
:param sort:
List of attributes to apply as an order by for the root model.
:param limit:
Applies an SQL LIMIT to the generated query.
:param offset:
Applies an SQL OFFSET to the generated query.
:param links:
A dictionary of links to apply as top level links in the built
query. Keys representing json keys and values as valid urls or
dictionaries.
:param from_obj:
A SQLAlchemy selectable (for example a Query object) to select the
query results from.
:param as_text:
Whether or not to build a query that returns the results as text
(raw json).
"""
from_obj = kwargs.pop('from_obj', None)
if from_obj is None:
from_obj = sa.orm.query.Query(model)
if kwargs.get('sort') is not None:
from_obj = apply_sort(
from_obj.statement,
from_obj,
kwargs.get('sort')
)
if kwargs.get('limit') is not None:
from_obj = from_obj.limit(kwargs.get('limit'))
if kwargs.get('offset') is not None:
from_obj = from_obj.offset(kwargs.get('offset'))
from_obj = from_obj.cte('main_query')
return SelectExpression(self, model, from_obj).build_select(**kwargs)
def select_one(self, model, id, **kwargs):
"""
Builds a query for selecting single resource instance.
::
query = query_builder.select_one(
Article,
1,
fields={'articles': ['name', 'author', 'comments']},
include=['author', 'comments.author'],
)
:param model:
The root model to build the select query from.
:param id:
The id of the resource to select.
:param fields:
A mapping of fields. Keys representing model keys and values as
lists of model descriptor names.
:param include:
List of dot-separated relationship paths.
:param links:
A dictionary of links to apply as top level links in the built
query. Keys representing json keys and values as valid urls or
dictionaries.
:param from_obj:
A SQLAlchemy selectable (for example a Query object) to select the
query results from.
:param as_text:
Whether or not to build a query that returns the results as text
(raw json).
"""
from_obj = kwargs.pop('from_obj', None)
if from_obj is None:
from_obj = sa.orm.query.Query(model)
from_obj = from_obj.filter(model.id == id).subquery()
query = SelectExpression(self, model, from_obj).build_select(
multiple=False,
**kwargs
)
query = query.where(query._froms[0].c.data.isnot(None))
return query
class Expression(object):
def __init__(self, query_builder, model, from_obj):
self.query_builder = query_builder
self.model = model
self.from_obj = from_obj
@property
def args(self):
return [self.query_builder, self.model, self.from_obj]
class SelectExpression(Expression):
def validate_field_keys(self, fields):
if fields:
unknown_keys = (
set(fields) -
set(self.query_builder.resource_registry.by_type.keys())
)
if unknown_keys:
raise UnknownFieldKey(
'Unknown field keys given. Could not find {0} {1} from '
'given model mapping.'.format(
'keys' if len(unknown_keys) > 1 else 'key',
','.join("'{0}'".format(key) for key in unknown_keys)
)
)
def build_select(
self,
fields=None,
include=None,
sort=None,
limit=None,
offset=None,
links=None,
multiple=True,
ids_only=False,
as_text=False
):
self.validate_field_keys(fields)
if fields is None:
fields = {}
params = Parameters(
fields=fields,
include=include,
sort=sort,
limit=limit,
offset=offset
)
from_args = self._get_from_args(
params,
multiple,
ids_only,
links
)
main_json_query = sa.select(from_args).alias('main_json_query')
expr = sa.func.row_to_json(sa.text('main_json_query.*'))
if as_text:
expr = sa.cast(expr, sa.Text)
query = sa.select(
[expr],
from_obj=main_json_query
)
return query
def _get_from_args(
self,
params,
multiple,
ids_only,
links
):
data_expr = DataExpression(*self.args)
data_query = (
data_expr.build_data_array(params, ids_only=ids_only)
if multiple else
data_expr.build_data(params, ids_only=ids_only)
)
from_args = [data_query.as_scalar().label('data')]
if params.include:
selectable = self.from_obj
include_expr = IncludeExpression(
self.query_builder,
self.model,
selectable
)
included_query = include_expr.build_included(params)
from_args.append(included_query.as_scalar().label('included'))
if links:
from_args.append(
sa.func.json_build_object(
*chain(*links.items())
).label('links')
)
return from_args
def apply_sort(from_obj, query, sort):
for param in sort:
query = query.order_by(
sa.desc(getattr(from_obj.c, param[1:]))
if param[0] == '-' else
getattr(from_obj.c, param)
)
return query
class AttributesExpression(Expression):
@property
def all_fields(self):
return [
field
for field, descriptor
in self.adapted_descriptors
if (
field != '__mapper__' and
field not in RESERVED_KEYWORDS and
not self.is_relationship_descriptor(descriptor) and
not self.should_skip_columnar_descriptor(descriptor)
)
]
def should_skip_columnar_descriptor(self, descriptor):
columns = get_descriptor_columns(self.from_obj, descriptor)
return (len(columns) == 1 and columns[0].foreign_keys)
@property
def adapted_descriptors(self):
return (
get_all_descriptors(self.from_obj).items() +
[
(
key,
adapt(self.from_obj, getattr(self.model, key))
)
for key in get_hybrid_properties(self.model).keys()
]
)
def adapt_attribute(self, attr_name):
cols = get_attrs(self.from_obj)
hybrids = get_hybrid_properties(self.model).keys()
if (
attr_name in hybrids or
attr_name in self.column_property_expressions
):
column = adapt(self.from_obj, getattr(self.model, attr_name))
else:
column = getattr(cols, attr_name)
return self.format_column(column)
def format_column(self, column):
for type_, formatter in self.query_builder.type_formatters.items():
if isinstance(column.type, type_):
return formatter(column)
return column
def is_relationship_field(self, field):
return field in get_mapper(self.model).relationships.keys()
def is_relationship_descriptor(self, descriptor):
return (
isinstance(descriptor, InstrumentedAttribute) and
isinstance(descriptor.property, sa.orm.RelationshipProperty)
)
def validate_column(self, field, column):
# Check that given column is an actual Column object and not for
# example select expression
if isinstance(column, sa.Column):
if column.foreign_keys:
raise InvalidField(
"Field '{0}' is invalid. The underlying column "
"'{1}' has foreign key. You can't include foreign key "
"attributes. Consider including relationship "
"attributes.".format(
field, column.key
)
)
def validate_field(self, field, descriptors):
if field in RESERVED_KEYWORDS:
raise InvalidField(
"Given field '{0}' is reserved keyword.".format(field)
)
if field not in descriptors.keys():
raise UnknownField(
"Unknown field '{0}'. Given selectable does not have "
"descriptor named '{0}'.".format(field)
)
columns = get_descriptor_columns(self.model, descriptors[field])
for column in columns:
self.validate_column(field, column)
def validate_fields(self, fields):
descriptors = get_all_descriptors(self.from_obj)
hybrids = get_hybrid_properties(self.model)
expressions = self.column_property_expressions
for field in fields:
if field in hybrids or field in expressions:
continue
self.validate_field(field, descriptors)
@property
def column_property_expressions(self):
return dict([
(key, attr)
for key, attr
in get_mapper(self.model).attrs.items()
if (
isinstance(attr, sa.orm.ColumnProperty) and
not isinstance(attr.columns[0], sa.Column)
)
])
def get_model_fields(self, fields):
model_key = self.query_builder.get_resource_type(self.model)
if not fields or model_key not in fields:
model_fields = self.all_fields
else:
model_fields = [
field for field in fields[model_key]
if not self.is_relationship_field(field)
]
self.validate_fields(model_fields)
return model_fields
def build_attributes(self, fields):
return chain_if(
*(
[s(key), self.adapt_attribute(key)]
for key in self.get_model_fields(fields)
)
)
class RelationshipsExpression(Expression):
def build_relationships(self, fields):
return chain_if(
*(
self.build_relationship(relationship)
for relationship
in self.get_relationship_properties(fields)
)
)
def build_relationship_data(self, relationship, alias):
identifier = self.query_builder.build_resource_identifier(
alias,
alias
)
expr = sa.func.json_build_object(*identifier).label('json_object')
query = select_correlated_expression(
self.model,
expr,
relationship.key,
alias,
get_selectable(self.from_obj),
order_by=self.build_order_by(relationship, alias)
).alias('relationships')
return query
def build_order_by(self, relationship, alias):
if relationship.order_by is not False:
return relationship.order_by
if (
(
hasattr(alias.id, 'expression') and
isinstance(alias.id.expression, Label)
) or
isinstance(alias.id, Label)
):
return alias.id.expression.get_children()
return [alias.id]
def build_relationship_data_array(self, relationship, alias):
query = self.build_relationship_data(relationship, alias)
return sa.select([
sa.func.coalesce(
sa.func.array_agg(query.c.json_object),
json_array
)
]).select_from(query)
def build_relationship(self, relationship):
cls = relationship.mapper.class_
alias = sa.orm.aliased(cls)
query = (
self.build_relationship_data_array(relationship, alias)
if relationship.uselist else
self.build_relationship_data(relationship, alias)
)
args = [s('data'), query.as_scalar()]
if self.query_builder.base_url:
links = LinksExpression(*self.args).build_relationship_links(
relationship.key
)
args.extend([
s('links'),
sa.func.json_build_object(*links)
])
return [
s(relationship.key),
sa.func.json_build_object(*args)
]
def get_relationship_properties(self, fields):
model_alias = self.query_builder.get_resource_type(self.model)
mapper = get_mapper(self.model)
if model_alias not in fields:
return list(mapper.relationships.values())
else:
return [
mapper.relationships[field]
for field in fields[model_alias]
if field in mapper.relationships.keys()
]
class LinksExpression(Expression):
def build_link(self, postfix=None):
args = [
s(self.query_builder.base_url),
s(self.query_builder.get_resource_type(self.model)),
s('/'),
self.query_builder.get_id(self.from_obj),
]
if postfix is not None:
args.append(postfix)
return sa.func.concat(*args)
def build_links(self):
if self.query_builder.base_url:
return [s('self'), self.build_link()]
def build_relationship_links(self, key):
if self.query_builder.base_url:
return [
s('self'),
self.build_link(s('/relationships/{0}'.format(key))),
s('related'),
self.build_link(s('/{0}'.format(key)))
]
class DataExpression(Expression):
def build_attrs_relationships_and_links(self, fields):
args = (self.query_builder, self.model, self.from_obj)
parts = {
'attributes': AttributesExpression(*args).build_attributes(
fields
),
'relationships': RelationshipsExpression(
*args
).build_relationships(fields),
'links': LinksExpression(*args).build_links()
}
return chain_if(
*(
[s(key), sa.func.json_build_object(*values)]
for key, values in parts.items()
if values
)
)
def build_data_expr(self, params, ids_only=False):
json_fields = self.query_builder.build_resource_identifier(
self.model,
self.from_obj
)
if not ids_only:
json_fields.extend(
self.build_attrs_relationships_and_links(params.fields)
)
return sa.func.json_build_object(*json_fields).label('data')
def build_data(self, params, ids_only=False):
expr = self.build_data_expr(params, ids_only=ids_only)
query = sa.select([expr], from_obj=self.from_obj)
return query
def build_data_array(self, params, ids_only=False):
data_query = self.build_data(params, ids_only=ids_only).alias()
return sa.select(
[sa.func.coalesce(
sa.func.array_agg(data_query.c.data),
json_array
)],
from_obj=data_query
).correlate(self.from_obj)
class IncludeExpression(Expression):
def build_included_union(self, params):
selects = [
self.build_single_included(params.fields, subpath)
for path in params.include
for subpath in subpaths(path)
]
union_select = union(*selects).alias()
query = sa.select(
[union_select.c.included.label('included')],
from_obj=union_select
)
if self.query_builder.sort_included:
query = query.order_by(
union_select.c.included[s('type')],
union_select.c.included[s('id')]
)
return query
def build_included(self, params):
included_union = self.build_included_union(params).alias()
return sa.select(
[sa.func.coalesce(
sa.func.array_agg(included_union.c.included),
jsonb_array
).label('included')],
from_obj=included_union
)
def build_single_included_fields(self, alias, fields):
json_fields = self.query_builder.build_resource_identifier(
alias,
alias
)
data_expr = DataExpression(
self.query_builder,
alias,
sa.inspect(alias).selectable
)
json_fields.extend(
data_expr.build_attrs_relationships_and_links(fields)
)
return json_fields
def build_included_json_object(self, alias, fields):
return sa.cast(
sa.func.json_build_object(
*self.build_single_included_fields(alias, fields)
),
JSONB
).label('included')
def build_single_included(self, fields, path):
relationships = path_to_relationships(path, self.model)
cls = relationships[-1].mapper.class_
subalias = sa.orm.aliased(cls)
subquery = select_correlated_expression(
self.model,
subalias.id,
path,
subalias,
self.from_obj,
correlate=False
).with_only_columns(split_if_composite(subalias.id)).distinct()
alias = sa.orm.aliased(cls)
expr = self.build_included_json_object(alias, fields)
query = sa.select(
[expr],
from_obj=alias
).where(alias.id.in_(subquery)).distinct()
if cls is self.model:
query = query.where(
alias.id.notin_(
sa.select(
split_if_composite(get_attrs(self.from_obj).id),
from_obj=self.from_obj
)
)
)
return query
def split_if_composite(column):
if (
hasattr(column.comparator, 'expression') and
isinstance(column.comparator.expression, CompositeId)
):
return column.comparator.expression.keys
return [column]
|
|
import socket
import struct
import threading
from _pydev_comm.pydev_io import PipeIO, readall
from _shaded_thriftpy.thrift import TClient
from _shaded_thriftpy.transport import TTransportBase
REQUEST = 0
RESPONSE = 1
class MultiplexedSocketReader(object):
def __init__(self, s):
self._socket = s
self._request_pipe = PipeIO()
self._response_pipe = PipeIO()
self._read_socket_lock = threading.RLock()
def read_request(self, sz):
"""
Invoked form server-side of the bidirectional transport.
"""
return self._request_pipe.read(sz)
def read_response(self, sz):
"""
Invoked form client-side of the bidirectional transport.
"""
return self._response_pipe.read(sz)
def _read_and_dispatch_next_frame(self):
with self._read_socket_lock:
direction, frame = self._read_frame()
if direction == REQUEST:
self._request_pipe.write(frame)
elif direction == RESPONSE:
self._response_pipe.write(frame)
def _read_frame(self):
buff = readall(self._socket.recv, 4)
sz, = struct.unpack('!i', buff)
if sz == 0:
# this is an empty message even without a direction byte
return None, None
else:
buff = readall(self._socket.recv, 1)
direction, = struct.unpack('!b', buff)
frame = readall(self._socket.recv, sz - 1)
return direction, frame
def start_reading(self):
t = threading.Thread(target=self._read_forever)
t.setDaemon(True)
t.start()
def _read_forever(self):
try:
while True:
self._read_and_dispatch_next_frame()
except EOFError:
# normal Python Console termination
pass
finally:
self._close_pipes()
def _close_pipes(self):
self._request_pipe.close()
self._response_pipe.close()
class SocketWriter(object):
def __init__(self, sock):
self._socket = sock
self._send_lock = threading.RLock()
def write(self, buf):
with self._send_lock:
self._socket.sendall(buf)
class FramedWriter(object):
MAX_BUFFER_SIZE = 4096
def __init__(self):
self._buffer = bytearray()
def _get_writer(self):
raise NotImplementedError
def _get_write_direction(self):
raise NotImplementedError
def write(self, buf):
buf_len = len(buf)
bytes_written = 0
while bytes_written < buf_len:
# buffer_size will be updated on self.flush()
buffer_size = len(self._buffer)
bytes_to_write = buf_len - bytes_written
if buffer_size + bytes_to_write > self.MAX_BUFFER_SIZE:
write_till_byte = bytes_written + (self.MAX_BUFFER_SIZE - buffer_size)
self._buffer.extend(buf[bytes_written:write_till_byte])
self.flush()
bytes_written = write_till_byte
else:
# the whole buffer processed
self._buffer.extend(buf[bytes_written:])
bytes_written = buf_len
def flush(self):
# reset wbuf before write/flush to preserve state on underlying failure
out = bytes(self._buffer)
# prepend the message with the direction byte
out = struct.pack("b", self._get_write_direction()) + out
self._buffer = bytearray()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive
# copies
self._get_writer().write(struct.pack("!i", len(out)) + out)
def close(self):
self._buffer = bytearray()
pass
class TBidirectionalClientTransport(FramedWriter, TTransportBase):
def __init__(self, client_socket, reader, writer):
super(TBidirectionalClientTransport, self).__init__()
# the following properties will be initialized in `open()`
self._client_socket = client_socket
self._reader = reader
self._writer = writer
self._is_closed = False
def _get_writer(self):
return self._writer
def _get_write_direction(self):
return REQUEST
def _read(self, sz):
"""
Reads a response from the multiplexed reader.
"""
return self._reader.read_response(sz)
def is_open(self):
return not self._is_closed
def close(self):
self._is_closed = True
self._client_socket.shutdown(socket.SHUT_RDWR)
self._client_socket.close()
class TServerTransportBase(object):
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
raise NotImplementedError
def close(self):
pass
class TReversedServerTransport(TServerTransportBase):
def __init__(self, read_fn, writer):
self._read_fn = read_fn
self._writer = writer
def accept(self):
return TReversedServerAcceptedTransport(self._read_fn, self._writer)
class TReversedServerAcceptedTransport(FramedWriter):
def __init__(self, read_fn, writer):
"""
:param read_fn: hi-level read function (reads solely requests from the input stream)
:param writer: low-level writer (it is expected to know only bytes)
"""
super(TReversedServerAcceptedTransport, self).__init__()
self._read_fn = read_fn
self._writer = writer
def _get_writer(self):
return self._writer
def _get_write_direction(self):
# this side acts as a server and writes responses back to the client
return RESPONSE
def read(self, sz):
return self._read_fn(sz)
class TSyncClient(TClient):
def __init__(self, service, iprot, oprot=None):
super(TSyncClient, self).__init__(service, iprot, oprot)
self._lock = threading.RLock()
def _req(self, _api, *args, **kwargs):
with self._lock:
return super(TSyncClient, self)._req(_api, *args, **kwargs)
def open_transports_as_client(addr):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect(addr)
return _create_client_server_transports(client_socket)
def open_transports_as_server(addr):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(addr)
server_socket.listen(1)
client_socket, address = server_socket.accept()
raise _create_client_server_transports(client_socket)
def _create_client_server_transports(sock):
reader = MultiplexedSocketReader(sock)
reader.start_reading()
writer = SocketWriter(sock)
client_transport = TBidirectionalClientTransport(sock, reader, writer)
server_transport = TReversedServerTransport(client_transport._reader.read_request, client_transport._writer)
return client_transport, server_transport
|
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import argparse
import textwrap
from datetime import datetime
import json
import numpy as np
import os
import pycurl
import pandas as pd
from pandas.io.json import json_normalize
from utils.utils import check_create_folder, exit, timer, list_files
from charts import settings, chartdata
import glob
DESCRIPTION = """A tool that processes OCDS record packages and generates
JSON files that can be used by the Procurement Dashboards project.
Commands:
procurement-charts.py [sourceFolder]
positional arguments:
sourceFolder Folder that contains JSON files with OCDS record
packages
"""
def args_options():
""" Generates an argument parser.
:returns:
Parser object
"""
parser = argparse.ArgumentParser(prog='python procurement-charts.py ./data/ocds',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(DESCRIPTION))
parser.add_argument('source',
help='Provide the path to the folder containing the source data.')
return parser
def slice_df(df, col, field):
"""
Slice a dataframe
:param df:
Pandas dataframe
:type df:
Dataframe
:param col:
The column name to slice on
:type col:
String
:param field:
String to slice on
:type field:
String
:returns:
A sliced dataframe
"""
try:
sliced_df = df.groupby(col).get_group(field)
except KeyError, e:
print 'The column "%s" doesn\'t contain any "%s"' % (col, field)
sliced_df = pd.DataFrame()
return sliced_df
def flatten_object(o):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[str(name[:-1])] = x
flatten(o)
return out
def flatten_contracts(f, df):
"""
Read an OCDS record package and flatten each contract so it can be added to
a Pandas dataframe
:param f:
Path to a file containing a record package
:type f:
String
:param df:
The dataframe the flattened contracts will be added to
:type df:
Pandas DataFrame
:returns:
DataFrame with the data
"""
with open(f, 'rb') as infile:
package = json.load(infile)
contracts = []
# De-normalize each contract by merging data about the
# tender, buyer and related award.
for r in package['records']:
for c in r['contracts']:
final = {}
final.update({'contract': c})
final.update({'tender': r['tender']})
final.update({'buyer': r['buyer']})
# Every contract is related to an award. Merge the related award
# object in the contract object
for a in r['awards']:
if a['id'] == c['awardID']:
final.update({'award': a})
break
contracts.append(flatten_object(final))
# Caveat:
# doesn't handle multiple suppliers well
flattened_contracts = json_normalize(contracts)
df = df.append(flattened_contracts,ignore_index=True)
return df
def main(args):
"""
Main function - launches the program.
"""
if args:
check_create_folder(settings.folder_charts)
df = pd.DataFrame()
# Read in the JSON files, flatten the contracts and add them to a DataFrame
for f in list_files(args.source + '*'):
df = flatten_contracts(f, df)
# Improve
df['contract_period_startDate'] = df['contract_period_startDate'].convert_objects(convert_dates='coerce')
df['tender_publicationDate'] = df['tender_publicationDate'].convert_objects(convert_dates='coerce')
df['tender_tenderPeriod_startDate'] = df['tender_tenderPeriod_startDate'].convert_objects(convert_dates='coerce')
df['award_date'] = df['award_date'].convert_objects(convert_dates='coerce')
# Cut every contract that's before a starting date
start_date = datetime.strptime(settings.start_date_charts,'%Y-%m-%d')
end_date = datetime.strptime(settings.end_date_charts,'%Y-%m-%d')
df = df[(df[settings.main_date_contract] >= start_date) & (df[settings.main_date_contract] <= end_date)]
# Generate the summary statistics, independent of comparison or slice
overview_data = chartdata.generate_overview(df)
with open(os.path.join(settings.folder_charts, 'general.json'), 'w') as outfile:
json.dump(overview_data, outfile)
for dimension in settings.dimensions:
for comparison in settings.comparisons:
# Each unique combination of dimension + comparison is a 'lense'
lense_id = dimension + '--' + comparison['id']
lense = {
'metadata': {
'id': lense_id
},
'charts': []
}
for chart in settings.charts:
if chart['dimension'] == dimension:
if chart['function']:
chart['meta']['data'] = []
previous_slice = False
d = { }
# Generate the chart data
for sl in comparison['slices']:
sliced_chart = { 'id': sl['id'], 'label': sl['label'] }
# Prep the dataframe, slice it or serve it full
if comparison['compare']:
sliced_df = slice_df(df, comparison['compare'], sl['field'])
else:
sliced_df = df
if not sliced_df.empty:
current_slice = chart['function'](sliced_df)
# Append the slice's data & meta-data
sliced_chart['data'] = current_slice['data']
chart['meta']['data'].append(sliced_chart)
# Update the domain based on the slice
for axis, func in chart['domain'].items():
if previous_slice:
d[axis] = func(d[axis], current_slice['domain'][axis])
else:
d[axis] = current_slice['domain'][axis]
previous_slice = True
# Add the domain to the chart
for axis, func in chart['domain'].items():
chart['meta'][axis]['domain'] = d[axis]
# Append the chart data
lense['charts'].append(chart['meta'])
file_name = os.path.join(settings.folder_charts,lense_id + '.json')
with open(file_name, 'w') as outfile:
json.dump(lense, outfile)
def __main__():
global parser
parser = args_options()
args = parser.parse_args()
with timer():
exit(*main(args))
if __name__ == "__main__":
try:
__main__()
except (KeyboardInterrupt, pycurl.error):
exit('Received Ctrl + C... Exiting! Bye.', 1)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for nectR Tutoring project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (nectr/config/settings/base.py - 3 = nectr/)
APPS_DIR = ROOT_DIR.path('nectr')
# Load operating system environment variables and then prepare to use them
env = environ.Env()
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = [
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
'django.contrib.humanize',
# Admin
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
# 'haystack', # search
'postman', # messaging
'channels' # chat
]
# Apps specific for this project go here.
LOCAL_APPS = [
# custom users app
'nectr.users.apps.UsersConfig',
# Your stuff: custom apps go here
'nectr.student.apps.StudentConfig',
'nectr.tutor.apps.TutorConfig',
'nectr.dashboard.apps.DashboardConfig',
'nectr.courses.apps.CoursesConfig',
'nectr.skills.apps.SkillsConfig',
'nectr.chat.apps.ChatConfig',
'nectr.schedule.apps.ScheduleConfig'
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'nectr.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
('Brandon', 'foxb@farmingdale.edu'),
]
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///nectr'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates'))
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# PASSWORD VALIDATION
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
# ------------------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'nectr.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'nectr.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ['nectr.taskapp.celery.CeleryConfig']
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ['kombu.transport.django']
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
if BROKER_URL == 'django://':
CELERY_RESULT_BACKEND = 'redis://'
else:
CELERY_RESULT_BACKEND = BROKER_URL
########## END CELERY
# django-compressor
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
# Search Integration using Haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr'
# ...or for multicore...
# 'URL': 'http://127.0.0.1:8983/solr/mysite',
},
}
# Basic channels configuration
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgiref.inmemory.ChannelLayer",
"ROUTING": "config.routing.channel_routing",
},
}
|
|
from flask import abort, flash, render_template, redirect, url_for, request
from flask.ext.login import login_required
from sqlalchemy.exc import IntegrityError
from wtforms.fields import SelectField
from flask_wtf.file import InputRequired
from forms import (
AddDescriptorOptionValueForm,
EditDescriptorNameForm,
EditDescriptorOptionValueForm,
EditDescriptorSearchableForm,
FixAllResourceOptionValueForm,
NewDescriptorForm,
ChangeRequiredOptionDescriptorForm,
RequiredOptionDescriptorMissingForm
)
from . import descriptor
from .. import db
from ..models import (
Descriptor,
OptionAssociation,
Resource,
RequiredOptionDescriptor,
RequiredOptionDescriptorConstructor
)
@descriptor.route('/')
@login_required
def index():
"""View all resource descriptors."""
descriptors = Descriptor.query.all()
return render_template('descriptor/index.html',
descriptors=descriptors)
@descriptor.route('/new-descriptor', methods=['GET', 'POST'])
@login_required
def new_descriptor():
"""Create a new descriptor."""
form = NewDescriptorForm()
for i in range(10):
form.option_values.append_entry()
if form.validate_on_submit():
values = []
for v in form.option_values.data:
if v is not None and len(v) != 0:
values.append(v)
descriptor = Descriptor(
name=form.name.data,
values=values,
is_searchable=form.is_searchable.data
)
if Descriptor.query.filter(Descriptor.name == form.name.data).first() \
is not None:
flash('Descriptor \"{}\" already exists.'.format(descriptor.name),
'form-error')
else:
db.session.add(descriptor)
try:
db.session.commit()
flash('Descriptor \"{}\" successfully created'
.format(descriptor.name),
'form-success')
return redirect(url_for('descriptor.new_descriptor'))
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.',
'form-error')
return render_template('descriptor/new_descriptor.html', form=form)
@descriptor.route('/<int:desc_id>', methods=['GET', 'POST'])
@login_required
def descriptor_info(desc_id):
"""Display the descriptor info."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option)
@descriptor.route('/<int:desc_id>/name', methods=['GET', 'POST'])
@login_required
def edit_name(desc_id):
"""Edit a descriptor's name."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
old_name = descriptor.name
form = EditDescriptorNameForm()
if form.validate_on_submit():
if Descriptor.query.filter(Descriptor.name == form.name.data).first() \
is not None:
if old_name == form.name.data:
flash('No change was made', 'form-error')
else:
flash('Descriptor \"{}\" already exists.'.format(form.name.data),
'form-error')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, form=form,
is_option=is_option)
descriptor.name = form.name.data
db.session.add(descriptor)
try:
db.session.commit()
flash('Name for descriptor \"{}\" successfully changed to \"{}\".'.format(old_name, descriptor.name),
'form-success')
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option)
form.name.data = descriptor.name
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, form=form, is_option=is_option)
@descriptor.route('/<int:desc_id>/searchable', methods=['GET', 'POST'])
@login_required
def edit_searchable(desc_id):
"""Edit a descriptor's searchability."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
old_value = descriptor.is_searchable
form = EditDescriptorSearchableForm()
if form.validate_on_submit():
descriptor.is_searchable = form.is_searchable.data
db.session.add(descriptor)
try:
db.session.commit()
flash('Searchability successfully changed from {} to {}.'
.format(old_value, descriptor.is_searchable),
'form-success')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option)
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
form.is_searchable.data = old_value
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, form=form, is_option=is_option)
@descriptor.route('/<int:desc_id>/option-values', methods=['GET', 'POST'])
@login_required
def change_option_values_index(desc_id):
"""Shows the page to add/edit/remove a descriptor's option values."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
if not is_option:
abort(404)
form = AddDescriptorOptionValueForm()
if form.validate_on_submit():
values = descriptor.values[:]
if form.value.data in values:
flash('Value \"{}\" already exists'.format(form.value.data),
'form-error')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option,
desc_id=desc_id, form=form)
values.append(form.value.data)
descriptor.values = values
db.session.add(descriptor)
try:
db.session.commit()
flash('Value \"{}\" successfully added.'.format(form.value.data),
'form-success')
form.value.data = ''
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option,
desc_id=desc_id, form=form)
@descriptor.route('/<int:desc_id>/option-values/edit/<int:option_index>',
methods=['GET', 'POST'])
@login_required
def edit_option_value(desc_id, option_index):
"""Edit a descriptor's selected option value."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
form = EditDescriptorOptionValueForm()
if not is_option:
abort(404)
if form.validate_on_submit():
old_value = descriptor.values[option_index]
values = descriptor.values[:]
values[option_index] = form.value.data
descriptor.values = values
db.session.add(descriptor)
try:
db.session.commit()
flash('Value \"{}\" for descriptor \"{}\" successfully changed to \"{}\".'
.format(old_value, descriptor.name,
descriptor.values[option_index]),
'form-success')
return redirect(url_for('descriptor.descriptor_info',
desc_id=desc_id))
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
else:
form.value.data = descriptor.values[option_index]
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option,
desc_id=desc_id, form=form)
@descriptor.route('/<int:desc_id>/option-values/remove/<int:option_index>',
methods=['GET', 'POST'])
@login_required
def remove_option_value(desc_id, option_index):
"""Remove a descriptor's selected option value."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None or len(descriptor.values) == 0:
abort(404)
old_value = descriptor.values[option_index]
req_opt_desc = RequiredOptionDescriptor.query.all()
is_required = False
if req_opt_desc:
req_opt_desc = req_opt_desc[0]
is_required = req_opt_desc.descriptor_id == desc_id
# go to required descriptor option value removal template
if is_required:
return redirect(url_for('descriptor.remove_option_value_req',
desc_id=desc_id, option_index=option_index))
if len(descriptor.values) == 1:
flash('Descriptor \"{}\" only has one value.'.format(descriptor.name),
'form-error')
return redirect(url_for('descriptor.change_option_values_index',
desc_id=desc_id))
option_assocs = OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.option == option_index
)).all()
choice_names, choices = generate_option_choices(descriptor, option_index)
# If no resources are affected, just remove the option value.
if len(option_assocs) == 0:
# HACK: keep descriptor value indices by inserting an empty string in place of the descriptor value
choice_names.insert(option_index, '')
remove_value_from_db(descriptor, choice_names, old_value)
return redirect(url_for('descriptor.descriptor_info', desc_id=desc_id))
form = FixAllResourceOptionValueForm()
if form.validate_on_submit():
OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.option == option_index
)).delete()
# HACK: keep descriptor value indices by inserting an empty string in place of the descriptor value
choice_names.insert(option_index, '')
if remove_value_from_db(descriptor, choice_names, old_value):
return redirect(url_for('descriptor.descriptor_info',
desc_id=desc_id))
else:
flash('Database error occurred. Please try again', 'form-error')
return render_template('descriptor/confirm_resources.html',
option_assocs=option_assocs, desc_id=desc_id,
desc=descriptor, option_index=option_index,
form=form)
@descriptor.route('/<int:desc_id>/option-values/remove-req/<int:option_index>',
methods=['GET', 'POST'])
@login_required
def remove_option_value_req(desc_id, option_index):
"""Remove a REQUIRED descriptor's selected option value."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None or len(descriptor.values) == 0:
abort(404)
old_value = descriptor.values[option_index]
req_opt_desc = RequiredOptionDescriptor.query.all()
is_required = False
if req_opt_desc:
req_opt_desc = req_opt_desc[0]
is_required = req_opt_desc.descriptor_id == desc_id
if not is_required:
abort(404)
if len(descriptor.values) == 1:
flash('Descriptor \"{}\" only has one value.'.format(descriptor.name),
'form-error')
return redirect(url_for('descriptor.change_option_values_index',
desc_id=desc_id))
option_assocs = OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.option == option_index
)).all()
choice_names, choices = generate_option_choices(descriptor, option_index)
# If no resources are affected, just remove the option value.
if len(option_assocs) == 0:
# HACK: keep descriptor value indices by inserting an empty string in place of the descriptor value
choice_names.insert(option_index, '')
remove_value_from_db(descriptor, choice_names, old_value)
return redirect(url_for('descriptor.descriptor_info', desc_id=desc_id))
# check for resources with only selected option as required descriptor value
missing_assocs = set()
for oa in option_assocs:
all_assocs_req = OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.resource_id == oa.resource_id
)).all()
if len(all_assocs_req) < 2:
missing_assocs.add(oa)
option_assocs = set(option_assocs) - missing_assocs
form = FixAllResourceOptionValueForm()
if form.validate_on_submit():
# Check that by removing the option value, all resources will still have
# a required option descriptor value
# Otherwise force the user to uphold this constraint
leftover = set()
for oa in missing_assocs:
all_assocs_req = OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.resource_id == oa.resource_id,
OptionAssociation.option != option_index,
)).all()
if len(all_assocs_req) == 0:
leftover.add(oa)
if len(leftover) > 0:
flash('All Resources must have an alternative required option descriptor value',
'form-error')
else:
# delete the option value associations
OptionAssociation.query.filter(db.and_(
OptionAssociation.descriptor_id == desc_id,
OptionAssociation.option == option_index
)).delete()
# HACK: keep descriptor value indices by inserting an empty string in place of the descriptor value
choice_names.insert(option_index, '')
if remove_value_from_db(descriptor, choice_names, old_value):
return redirect(url_for('descriptor.descriptor_info',
desc_id=desc_id))
else:
flash('Database error occurred. Please try again', 'form-error')
return render_template('descriptor/confirm_resources_req.html',
option_assocs=option_assocs, desc_id=desc_id,
desc=descriptor, option_index=option_index,
form=form, missing_assocs=missing_assocs)
def generate_option_choices(descriptor, removed_index):
"""Helper function to generate the new options + indices"""
choice_names = (descriptor.values[:removed_index] +
descriptor.values[removed_index + 1:])
choices = []
for i in range(len(choice_names)):
choices.append((i, choice_names[i]))
return choice_names, choices
def remove_value_from_db(descriptor, values, old_value):
"""Helper function to update the values an option can take."""
descriptor.values = values
db.session.add(descriptor)
try:
db.session.commit()
flash('Value \"{}\" for descriptor \"{}\" successfully removed.'
.format(old_value, descriptor.name),
'form-success')
return True
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
return False
@descriptor.route('/<int:desc_id>/delete_request')
@login_required
def delete_descriptor_request(desc_id):
"""Shows the page for deletion of a descriptor."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
req_opt_desc = RequiredOptionDescriptor.query.all()
is_required = False
if req_opt_desc:
req_opt_desc = req_opt_desc[0]
is_required = req_opt_desc.descriptor_id == descriptor.id
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option,
is_required=is_required)
@descriptor.route('/<int:desc_id>/delete')
@login_required
def delete_descriptor(desc_id):
"""Deletes a descriptor."""
descriptor = Descriptor.query.get(desc_id)
if descriptor is None:
abort(404)
is_option = len(descriptor.values) != 0
db.session.delete(descriptor)
try:
db.session.commit()
flash('Successfully deleted descriptor %s.' % descriptor.name,
'success')
except IntegrityError:
db.session.rollback()
flash('Database error occurred. Please try again.', 'form-error')
return render_template('descriptor/manage_descriptor.html',
desc=descriptor, is_option=is_option)
return redirect(url_for('descriptor.index'))
@descriptor.route('/change-required-option-descriptor', methods=['GET', 'POST'])
@login_required
def change_required_option_descriptor():
descriptors = Descriptor.query.all()
choices = []
for d in descriptors:
if d.values:
choices.append((d.name, d.name))
req_opt_desc = RequiredOptionDescriptor.query.all()[0]
current_name = ""
if req_opt_desc.descriptor_id != -1:
descriptor = Descriptor.query.filter_by(
id=req_opt_desc.descriptor_id
).first()
if descriptor is not None:
current_name = descriptor.name
if current_name != "":
setattr(
ChangeRequiredOptionDescriptorForm,
'descriptor',
SelectField(
'Option Descriptor',
choices=choices,
validators=[InputRequired()],
default=current_name)
)
form = ChangeRequiredOptionDescriptorForm()
if form.validate_on_submit():
RequiredOptionDescriptorConstructor.query.delete()
db.session.commit()
desc = Descriptor.query.filter_by(
name=form.descriptor.data
).first()
if desc is not None:
req_opt_desc_const = RequiredOptionDescriptorConstructor(name=desc.name, values=desc.values)
db.session.add(req_opt_desc_const)
db.session.commit()
return redirect(url_for('descriptor.review_required_option_descriptor'))
else:
form = None
return render_template(
'descriptor/change_required_option_descriptor.html',
form=form
)
@descriptor.route('/review-required-option-descriptor', methods=['GET', 'POST'])
@login_required
def review_required_option_descriptor():
req_opt_desc_const = RequiredOptionDescriptorConstructor.query.all()[0]
form = RequiredOptionDescriptorMissingForm()
missing_resources = []
resources = Resource.query.all()
descriptor = Descriptor.query.filter_by(
name=req_opt_desc_const.name
).first()
for r in resources:
if descriptor is None:
missing_resources.append(r.name)
else:
option_association = OptionAssociation.query.filter_by(
resource_id = r.id,
descriptor_id=descriptor.id
).first()
if option_association is None:
missing_resources.append(r.name)
if request.method == 'POST':
if len(form.resources.data) < len(missing_resources):
flash('Error: You must choose an option for each resource. Please try again.', 'form-error')
else:
for j, r_name in enumerate(missing_resources):
resource = Resource.query.filter_by(
name=r_name
).first()
if resource is not None:
for val in form.resources.data[j]:
new_association = OptionAssociation(
resource_id=resource.id,
descriptor_id=descriptor.id,
option=descriptor.values.index(val),
resource=resource,
descriptor=descriptor)
db.session.add(new_association)
RequiredOptionDescriptor.query.delete()
req_opt_desc = RequiredOptionDescriptor(descriptor_id=descriptor.id)
db.session.add(req_opt_desc)
db.session.commit()
return redirect(url_for('descriptor.index'))
for j, r_name in enumerate(missing_resources):
form.resources.append_entry()
form.resources[j].label = r_name
form.resources[j].choices = [(v, v) for v in req_opt_desc_const.values]
return render_template('descriptor/review_required_option_descriptor.html', form=form)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import ctypes
import datetime
import os
import pickle
import sys
import unittest
from pyspark.sql import Row
from pyspark.sql.functions import col, UserDefinedFunction
from pyspark.sql.types import *
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings, \
_array_unsigned_int_typecode_ctype_mappings, _infer_type, _make_type_verifier, _merge_type
from pyspark.testing.sqlutils import ReusedSQLTestCase, ExamplePointUDT, PythonOnlyUDT, \
ExamplePoint, PythonOnlyPoint, MyObject
class TypesTests(ReusedSQLTestCase):
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
with self.tempView("test"):
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
with self.tempView("test2"):
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_schema_specification(self):
from decimal import Decimal
class A(object):
def __init__(self):
self.a = 1
data = [
True,
1,
"a",
u"a",
datetime.date(1970, 1, 1),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
array.array("d", [1]),
[1],
(1, ),
{"a": 1},
bytearray(1),
Decimal(1),
Row(a=1),
Row("a")(1),
A(),
]
df = self.spark.createDataFrame([data])
actual = list(map(lambda x: x.dataType.simpleString(), df.schema))
expected = [
'boolean',
'bigint',
'string',
'string',
'date',
'timestamp',
'double',
'array<double>',
'array<bigint>',
'struct<_1:bigint>',
'map<string,bigint>',
'binary',
'decimal(38,18)',
'struct<a:bigint>',
'struct<a:bigint>',
'struct<a:bigint>',
]
self.assertEqual(actual, expected)
actual = list(df.first())
expected = [
True,
1,
'a',
u"a",
datetime.date(1970, 1, 1),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
[1.0],
[1],
Row(_1=1),
{"a": 1},
bytearray(b'\x00'),
Decimal('1.000000000000000000'),
Row(a=1),
Row(a=1),
Row(a=1),
]
self.assertEqual(actual, expected)
def test_infer_schema_not_enough_names(self):
df = self.spark.createDataFrame([["a", "b"]], ["col1"])
self.assertEqual(df.columns, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaisesRegexp(TypeError, 'field a'):
self.spark.createDataFrame(self.spark.sparkContext.parallelize([[1, 1], ["x", 1]]),
schema=["a", "b"], samplingRatio=0.99)
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_dict_respects_schema(self):
df = self.spark.createDataFrame([{'a': 1}], ["b"])
self.assertEqual(df.columns, ['b'])
def test_negative_decimal(self):
df = self.spark.createDataFrame([(1, ), (11, )], ["value"])
ret = df.select(col("value").cast(DecimalType(1, -1))).collect()
actual = list(map(lambda r: int(r.value), ret))
self.assertEqual(actual, [0, 10])
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
with self.tempView("table2"):
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
with self.tempView("test"):
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
with self.tempView("labeled_point"):
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
with self.tempView("labeled_point"):
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_cast_to_string_with_udt(self):
from pyspark.sql.functions import col
row = (ExamplePoint(1.0, 2.0), PythonOnlyPoint(3.0, 4.0))
schema = StructType([StructField("point", ExamplePointUDT(), False),
StructField("pypoint", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
result = df.select(col('point').cast('string'), col('pypoint').cast('string')).head()
self.assertEqual(result, Row(point=u'(1.0, 2.0)', pypoint=u'[3.0, 4.0]'))
def test_struct_type(self):
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_merge_type(self):
self.assertEqual(_merge_type(LongType(), NullType()), LongType())
self.assertEqual(_merge_type(NullType(), LongType()), LongType())
self.assertEqual(_merge_type(LongType(), LongType()), LongType())
self.assertEqual(_merge_type(
ArrayType(LongType()),
ArrayType(LongType())
), ArrayType(LongType()))
with self.assertRaisesRegexp(TypeError, 'element in array'):
_merge_type(ArrayType(LongType()), ArrayType(DoubleType()))
self.assertEqual(_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), LongType())
), MapType(StringType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'key of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(DoubleType(), LongType()))
with self.assertRaisesRegexp(TypeError, 'value of map'):
_merge_type(
MapType(StringType(), LongType()),
MapType(StringType(), DoubleType()))
self.assertEqual(_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", LongType()), StructField("f2", StringType())])
), StructType([StructField("f1", LongType()), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'field f1'):
_merge_type(
StructType([StructField("f1", LongType()), StructField("f2", StringType())]),
StructType([StructField("f1", DoubleType()), StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", LongType())]))])
), StructType([StructField("f1", StructType([StructField("f2", LongType())]))]))
with self.assertRaisesRegexp(TypeError, 'field f2 in field f1'):
_merge_type(
StructType([StructField("f1", StructType([StructField("f2", LongType())]))]),
StructType([StructField("f1", StructType([StructField("f2", StringType())]))]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]),
StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())])
), StructType([StructField("f1", ArrayType(LongType())), StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'element in array field f1'):
_merge_type(
StructType([
StructField("f1", ArrayType(LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", ArrayType(DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())])
), StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]))
with self.assertRaisesRegexp(TypeError, 'value of map field f1'):
_merge_type(
StructType([
StructField("f1", MapType(StringType(), LongType())),
StructField("f2", StringType())]),
StructType([
StructField("f1", MapType(StringType(), DoubleType())),
StructField("f2", StringType())]))
self.assertEqual(_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))])
), StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]))
with self.assertRaisesRegexp(TypeError, 'key of map element in array field f1'):
_merge_type(
StructType([StructField("f1", ArrayType(MapType(StringType(), LongType())))]),
StructType([StructField("f1", ArrayType(MapType(DoubleType(), LongType())))])
)
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_struct_field_type_name(self):
struct_field = StructField("a", IntegerType())
self.assertRaises(TypeError, struct_field.typeName)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
if __name__ == "__main__":
from pyspark.sql.tests.test_types import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#!/usr/bin/env python
## Copyright 2015 Novartis Institutes for BioMedical Research
## Inc.Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an "AS IS"
## BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
## implied. See the License for the specific language governing
## permissions and limitations under the License.
################################################################################
##
## Script to extract the status of a job from the cluster queue and the
## log files
##
################################################################################
import sys
import argparse
import re
import os.path
import subprocess
## execfile(os.path.join(os.environ["HOME"], "ngs", "pipelines", "exon-pipeline", "bin", "util-lib", "getJobStatus.py"))
################################################################################
##
## Define command line options
##
################################################################################
parser = argparse.ArgumentParser(description='Extract pipeline meta informations')
parser.add_argument('-s', dest="samplesDir", metavar="<Samples sub directory>", default="samples", help='Full path to sub directory of <project dir> containing the samples.')
parser.add_argument('-S', dest="sample", metavar="<Sample>", help='Name of the sample which is considered.')
parser.add_argument('-C', dest="chunk", default = "none", metavar="<Chunk>", help='Name of the chunk which is considered.')
parser.add_argument('-A', dest="aligner", default = "bowtie2", metavar="<Aligner>", help='Name of the aligner used [bowtie2].')
parser.add_argument('-J', dest="jobName", metavar="<Job name>", help='Name of the job.')
parser.add_argument('-O', dest="operation", metavar="<Operation>", help='Operation to be considered: one of align-genome, align-transcripts, align-junctions, compute-counts')
parser.add_argument('-E', dest="noError", action='store_true', help='Do not allow for any error messages in the log-files.')
parser.add_argument('-w', dest="warningsOn", action='store_true', help='Show warning messages.')
if len(sys.argv) > 1:
args = parser.parse_args(sys.argv[1:])
else:
inputArgs = []
inputArgs.append("-s")
inputArgs.append(os.path.join(os.environ["HOME"], "ngs", "RNA-seq", "2014", "WNT_YAPC_NG81", "samples"))
inputArgs.append("-S")
inputArgs.append("YAPC_DMSO_24h_15")
inputArgs.append("-C")
inputArgs.append("C004")
inputArgs.append("-A")
inputArgs.append("bowtie2")
inputArgs.append("-O")
inputArgs.append("compute-counts")
inputArgs.append("-J")
inputArgs.append("CC-YAPC_DMSO_24h_15-C004-bowtie2")
inputArgs.append("-E")
args = parser.parse_args(inputArgs)
samplesDir = args.samplesDir
sample = args.sample
chunk = args.chunk
aligner = args.aligner
operation = args.operation
jobName = args.jobName
noError = args.noError
warningsOn = args.warningsOn
if len(sys.argv) <= 1:
print "Using default parameter:"
print "samplesDir: " + samplesDir
print "sample : " + sample
print "chunk : " + chunk
print "operation : " + operation
print "jobName : " + jobName
print "noError : " + str(noError)
################################################################################
##
## Get the status for split jobs
##
################################################################################
def getSplitStatus (logFileList, fullSampleDir, status):
logDate = {}
for logFile in logFileList:
if logFile != "":
date = re.sub(".*-([0-9]*-[0-9]*)[.]log[~]*$", "\\1", logFile)
if date != logFile:
if date > logDate:
logDate = date
else:
raise Exception ("Log file " + logFile + " does not have right format to extract the date: .*-<yymmdd>-<hhmm>.log")
try:
logFilename = os.path.join(fullSampleDir, "log-files", logFile)
# print "Reading file: " + logFilename
logFile = open(logFilename)
if status == "running":
splitComplete = True
else:
splitComplete = False
fastqFileRead = False
for line in logFile:
if "Error" in line or "ERROR" in line or "exception" in line.lower() or "exiting" in line.lower() or line.strip() == "null":
splitComplete = False
break
if line.strip () == "Fastq input complete.":
fastqFileRead = True
if fastqFileRead and line.strip() == "Done":
splitComplete = True
logFile.close()
except IOError, e:
print logFile + " cannot be opened ... skipping"
print "Error: " + str(e[0]) + " " + str(e[1])
return splitComplete
################################################################################
##
## Get the percent aligned reads and the alignment status
##
################################################################################
def getBowtie2AlignmentStatus (logFileList, fullSampleDir, chunks, status):
chunkLogFile = {}
chunkLogDate = {}
for logFile in logFileList:
if logFile != "":
chunk = re.sub(".*-(C[0-9]*)-.*", "\\1", logFile)
date = re.sub(".*-([0-9]*-[0-9]*)[.]log[~]*$", "\\1", logFile)
if chunk in chunkLogDate:
if date > chunkLogDate[chunk]:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
else:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
numReads = {}
percentAligned = {}
numAlignedReads = {}
alignmentComplete = {}
for chunk in chunks:
if chunk in chunkLogFile:
try:
logFilename = os.path.join(fullSampleDir, "log-files", chunkLogFile[chunk])
# print "Reading file: " + logFilename
logFile = open(logFilename)
if status[chunk] == "running":
alignmentComplete[chunk] = True
else:
alignmentComplete[chunk] = False
numLines = 0
for line in logFile:
# print "Line: " + line
if "error" in line.lower() or "exception" in line.lower() or "exiting" in line.lower() or line.strip() == "null":
alignmentComplete[chunk] = False
break
if "reads; of these:" in line:
numReads[chunk] = int(re.sub("reads; of these:", "", line).strip())
if "% overall alignment rate" in line:
percentAligned[chunk] = float (re.sub("% overall alignment rate", "", line).strip())
if "NUMBER_MAPPED_READS=" in line:
numAlignedReads[chunk] = int(re.sub(".*NUMBER_MAPPED_READS=", "", line).strip())
if line.strip()[:len("Alignment done")] == "Alignment done" or "successfully completed" in line:
alignmentComplete[chunk] = True
# print "alignmentComplete["+chunk+"]: " + str(alignmentComplete[chunk])
numLines = +1
if numLines == 0:
print >> sys.stderr, "Warning: File " + chunkLogFile[chunk] + " is empty."
logFile.close()
except IOError, e:
raise Exception (chunkLogFile[chunk] + " cannot be opened ... skipping\n" + \
"Unix error number: " + str(e[0]) + " and message: " + str(e[1]))
if chunk in status and status[chunk] != "running" and not chunk in alignmentComplete:
alignmentComplete[chunk] = False
if chunk in numReads and chunk in percentAligned and not chunk in numAlignedReads:
numAlignedReads[chunk] = int(numReads[chunk] * percentAligned[chunk])
if not chunk in numReads:
numReads[chunk] = ""
if not chunk in percentAligned:
percentAligned[chunk] = ""
if not chunk in numAlignedReads:
numAlignedReads[chunk] = ""
return numReads, percentAligned, alignmentComplete
################################################################################
##
## Get the Tophat2 alignment exit status counts
##
################################################################################
def getTophat2AlignmentStatus (logFileList, fullSampleDir, chunks, status):
chunkLogFile = {}
chunkLogDate = {}
for logFile in logFileList:
if logFile != "":
chunk = re.sub(".*-(C[0-9]*)-.*", "\\1", logFile)
date = re.sub(".*-([0-9]*-[0-9]*)[.]log[~]*$", "\\1", logFile)
if chunk in chunkLogDate:
if date > chunkLogDate[chunk]:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
else:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
alignmentComplete = {}
for chunk in chunks:
if chunk in chunkLogFile:
try:
logFilename = os.path.join(fullSampleDir, "log-files", chunkLogFile[chunk])
# print "Reading file: " + logFilename
logFile = open(logFilename)
if status[chunk] == "running":
alignmentComplete[chunk] = True
else:
alignmentComplete[chunk] = False
for line in logFile:
if "error" in line.lower() or "exception" in line.lower() or "exiting" in line.lower() or line.strip() == "null":
alignmentComplete[chunk] = False
break
if line.strip() == "Tophat2 alignments successfully completed." or line.strip() == "Tophat2 alignment sucessfully completed.":
alignmentComplete[chunk] = True
logFile.close()
except IOError, e:
raise Exception (chunkLogFile[chunk] + " cannot be opened ... skipping\n" + \
"Unix error number: " + str(e[0]) + " and message: " + str(e[1]))
if not chunk in alignmentComplete:
alignmentComplete[chunk] = False
return alignmentComplete
################################################################################
##
## Get the number of aligned and expressed reads as well as the compute counts
## exit status
##
################################################################################
def getComputeCountsStatus (logFileList, fullSampleDir, chunks, prefixList, noError, status):
chunkLogFile = {}
chunkLogDate = {}
for logFile in logFileList:
# print logFile
if logFile != "":
chunkFound = False
for prefix in prefixList:
chunk = re.sub("^" + prefix + ".*-(C[0-9]*)-.*", "\\1", logFile)
date = re.sub("^" + prefix + ".*-([0-9]*-[0-9]*)[.]log[~]*$", "\\1", logFile)
if chunk != logFile and date != logFile:
chunkFound = True
if chunk in chunkLogDate:
if date > chunkLogDate[chunk]:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
else:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
if not chunkFound:
raise Exception ("The filename " + logFile + " not of the form " + "^" + "|".join(prefixList) + "-(C[0-9]*).*-([0-9]*-[0-9]*)[.]log[~]*")
geneCountsStarted = {}
geneCountsCompleted = {}
junctionCountsStarted = {}
junctionCountsCompleted = {}
numAlignedReads = {}
numExpressedReads = {}
operationComplete = {}
errorOccured = {}
for chunk in chunks:
if chunk in chunkLogFile:
try:
logFilename = os.path.join(fullSampleDir, "log-files", chunkLogFile[chunk])
## print >> sys.stderr, "Reading file: " + logFilename
logFile = open(logFilename)
if status[chunk] == "running":
operationComplete[chunk] = True
else:
operationComplete[chunk] = False
errorOccured[chunk] = False
numLines = 0
for line in logFile:
if "NUMBER_MAPPED_READS=" in line:
numAlignedReads[chunk] = int(re.sub(".*NUMBER_MAPPED_READS=", "", line).strip())
if "NUMBER_EXPRESSED_READS=" in line:
numAlignedReads[chunk] = int(re.sub(".*NUMBER_EXPRESSED_READS=", "", line).strip())
if "ERROR" in line.upper() or "Exception" in line or "exiting" in line or line.strip() == "null":
errorOccured[chunk] = True
if "ComputeGeneCountsSam -R" in line:
junctionCountsStarted[chunk] = True
if chunk in junctionCountsStarted and "Done" in line:
junctionCountsCompleted[chunk] = True
if "ComputeExonCounts -g -w" in line or "ComputeCounts -g -w" in line:
geneCountsStarted[chunk] = True
if chunk in geneCountsStarted and "Done" in line:
geneCountsCompleted[chunk] = True
if "successfully completed" in line:
operationComplete[chunk] = True
numLines = +1
if numLines == 0:
print >> sys.stderr, "Warning: File " + chunkLogFile[chunk] + " is empty."
logFile.close()
except IOError, e:
raise Exception (chunkLogFile[chunk] + " cannot be opened ... skipping\n" + \
"Unix error number: " + str(e[0]) + " and message: " + str(e[1]))
if not chunk in operationComplete:
operationComplete[chunk] = False
if not chunk in numAlignedReads:
numAlignedReads[chunk] = ""
if not chunk in numExpressedReads:
numExpressedReads[chunk] = ""
if noError and chunk in errorOccured and errorOccured[chunk]:
operationComplete[chunk] = False
return numAlignedReads, numExpressedReads, operationComplete
################################################################################
##
## Get the exit status
##
################################################################################
def getJobExitStatus (logFileList, fullSampleDir, chunks, prefix, noError, status):
chunkLogFile = {}
chunkLogDate = {}
for logFile in logFileList:
## print logFile
if logFile != "":
chunk = re.sub("^" + prefix + ".*-(C[0-9]*)-.*", "\\1", logFile)
date = re.sub("^" + prefix + ".*-([0-9]*-[0-9]*)[.]log[~]*$", "\\1", logFile)
if chunk in chunkLogDate:
if date > chunkLogDate[chunk]:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
else:
chunkLogDate[chunk] = date
chunkLogFile[chunk] = logFile
operationComplete = {}
errorOccured = {}
for chunk in chunks:
if chunk in chunkLogFile:
try:
logFilename = os.path.join(fullSampleDir, "log-files", chunkLogFile[chunk])
## print "Reading file: " + logFilename
logFile = open(logFilename)
except IOError, e:
raise Exception(chunkLogFile[chunk] + " cannot be opened ... skipping \n" + \
"Unix error code and message: " + str(e[0]) + " " + str(e[1]))
if status[chunk] == "running":
operationComplete[chunk] = True
else:
operationComplete[chunk] = False
errorOccured[chunk] = False
numLines = 0
for line in logFile:
# print >> sys.stderr, line
if "ERROR" in line.upper () or "Exception" in line or "exiting" in line or line.strip() == "null":
errorOccured[chunk] = True
if "successfully completed" in line.strip():
operationComplete[chunk] = True
# print >> sys.stderr, "operationComplete[" + chunk + "]: "+ str(operationComplete[chunk]) + ", errorOccured[" + chunk + "]: "+ str(errorOccured[chunk])
numLines = +1
if numLines == 0:
print >> sys.stderr, "Warning: File " + chunkLogFile[chunk] + " is empty."
logFile.close()
if not chunk in operationComplete:
operationComplete[chunk] = False
# print >> sys.stderr, "errorOccured[" + chunk + "]: "+ str(errorOccured[chunk])
if noError and chunk in errorOccured and errorOccured[chunk]:
operationComplete[chunk] = False
return operationComplete
################################################################################
##
## Read log file and extract fields
##
################################################################################
sampleDir = os.path.join(samplesDir, sample)
if not os.path.isdir(sampleDir):
raise Exception ("ERROR: directory " + sampleDir + " not found.")
chunks = [chunk]
try:
cmd = " ".join(["qstat -r"])
qstatInf = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).stdout.read().strip().split("\n")
except IOError, e:
raise Exception ("Could not execute qstat -r\n" + "Unix error code and message: " + str(e[0]) + " " + str(e[1]))
jobStatus = {}
for chunk in chunks:
jobStatus[chunk] = "not submitted"
for line in qstatInf:
fields = line.split()
if not "job-ID" in line and len(fields) > 5:
if jobName.startswith(fields[2]):
## print ", ".join([fields[2], fields[4]])
if fields[4] == 'r':
currentJobStatus = "running"
elif "E" in fields[4]:
currentJobStatus = "SGE-error"
elif "d" in fields[4].lower():
currentJobStatus = "in deletion"
elif "h" in fields[4].lower():
currentJobStatus = "on hold"
elif "w" in fields[4].lower() or "t" in fields[4].lower():
currentJobStatus = "waiting"
else:
currentJobStatus = "not submitted"
elif "Full jobname:" in line:
## print line + ", " + str(jobName == fields[-1])
if fields[-1] == jobName:
jobStatus[chunk] = currentJobStatus
## print jobStatus[chunk]
if jobStatus[chunk] == "not submitted" or jobStatus[chunk] == "in deletion" or jobStatus[chunk] == "running":
if operation in ["split"]:
logFilePrefix = operation
else:
logFilePrefix = operation + "-" + aligner
chunkRegExp="-" + chunk
if operation in ["split", "merge-genome-alignments"]:
chunkRegExp=""
cmd = " ".join(["ls -1", os.path.join(sampleDir, "log-files"), "| grep '^" + logFilePrefix + chunkRegExp + "-'"])
logFileList = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).stdout.read().strip().split("\n")
if len(logFileList) > 1 or logFileList[0] != "":
if operation == "split":
#Process split log files
operationComplete = {}
operationComplete[chunk] = getSplitStatus (logFileList, sampleDir, jobStatus[chunk])
elif operation in ["align-genome", "align-transcripts", "align-junctions"]:
#Process genome log files
numReads, percentAligned, operationComplete = {}, {}, {}
if aligner == "tophat2" and operation == "align-genome":
operationComplete = getTophat2AlignmentStatus (logFileList, sampleDir, chunks, jobStatus)
else:
numReads, percentAligned, operationComplete = getBowtie2AlignmentStatus (logFileList, sampleDir, chunks, jobStatus)
elif operation == "compute-counts" or operation == "compute-junction-counts":
#Process compute counts log files
countNumReads, countExpressedReads, operationComplete = {}, {}, {}
countNumReads, countExpressedReads, operationComplete = getComputeCountsStatus (logFileList, sampleDir, chunks, \
["counts", operation + "-" + aligner], noError, jobStatus)
else:
if warningsOn and not operation in ["align-genome-eqp", "merge-genome-alignments"]:
print >> sys.stderr, "Unknown operation: " + operation
operationComplete = {}
operationComplete = getJobExitStatus (logFileList, sampleDir, chunks, operation, noError, jobStatus)
if chunk in operationComplete:
if operationComplete[chunk]:
if jobStatus[chunk] != "running":
jobStatus[chunk] = "complete"
else:
if jobStatus[chunk] != "running":
jobStatus[chunk] = "failed"
else:
jobStatus[chunk] = "crashed"
if operation in ["split", "merge-genome-alignments"]:
print "\t".join([sample, operation, jobName, jobStatus[chunk]])
else:
print "\t".join([sample, chunk, operation, jobName, jobStatus[chunk]])
|
|
"""
(c) RIKEN 2017. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
import iotbx.phil
import os
import subprocess
import tempfile
import shutil
import h5py
import bitshuffle.h5
master_params_str = """
h5in = None
.type = path
.help = "Input file"
h5out = None
.type = path
.help = "Output file"
replace = False
.type = bool
.help = "Replace original file with updated file"
remove_filters_first = False
.type = bool
.help = "Run h5repack -f NONE to remove all filters first."
compress = bslz4 *bslz4_and_gzipshuf
.type = choice(multi=False)
.help = "Compress large datasets in master.h5. bslz4_and_gzipshuf is to apply bslz4 for pixel_mask and gzip+shuf for other datasets."
remove_detectorModule_data = flatfield pixel_mask trimbit
.type = choice(multi=True)
.help = "Optionally remove data in /entry/instrument/detector/detectorSpecific/detectorModule_* flatfield and pixel_mask are redundant information."
remove_overall = flatfield pixel_mask
.type = choice(multi=True)
.help = "Optionally remove flatfield and pixel_mask data in /entry/instrument/detector/detectorSpecific/. NOTE they (especially pixel_mask) may be necessary data."
"""
class ReduceMaster:
def __init__(self, h5in):
self.h = h5py.File(h5in, "a")
# __init__()
def remove_redundant(self, kinds):
assert set(kinds).issubset(("flatfield","pixel_mask","trimbit"))
detSp = self.h["/entry/instrument/detector/detectorSpecific"]
for k in detSp.keys():
if k.startswith("detectorModule_"):
for kk in kinds:
if kk not in detSp[k]: continue
print "Removing", k+"/"+kk
del detSp[k][kk]
# remove_redundant()
def remove_datasets(self, kinds):
assert set(kinds).issubset(("flatfield","pixel_mask"))
detSp = self.h["/entry/instrument/detector/detectorSpecific"]
for k in kinds:
print "Removing %s" % k
del detSp[k]
def find_large_dataset_visitor(self, path, obj):
if type(obj) is h5py.Dataset and obj.size > 100:
self.large_datasets.append(path)
# find_large_dataset_visitor()
def compress_large_datasets(self, compress):
if not compress: return
assert compress in ("bslz4", "bslz4_and_gzipshuf")
self.large_datasets = []
self.h.visititems(self.find_large_dataset_visitor)
for path in self.large_datasets:
print "Compressing %s (size=%d)" % (path, self.h[path].size)
data = self.h[path][:]
del self.h[path]
if compress=="bslz4":
self.h.create_dataset(path, data.shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),
chunks=None, dtype=data.dtype, data=data)
elif compress == "bslz4_and_gzipshuf":
if "pixel_mask" in path: # bslz4
self.h.create_dataset(path, data.shape,
compression=bitshuffle.h5.H5FILTER,
compression_opts=(0, bitshuffle.h5.H5_COMPRESS_LZ4),
chunks=None, dtype=data.dtype, data=data)
else: # gzip+shuf
self.h.create_dataset(path, data.shape,
compression="gzip",shuffle=True,
chunks=None, dtype=data.dtype, data=data)
else:
raise "Never reaches here."
# compress_large_datasets()
def close(self): self.h.close()
# class ReduceMaster
def run(params):
print "Parameters:"
iotbx.phil.parse(master_params_str).format(params).show(prefix=" ")
print
master_size_org = os.path.getsize(params.h5in)
if params.replace:
shutil.copyfile(params.h5in, params.h5in+".org")
params.h5out = params.h5in
tmpfd, tmpout = tempfile.mkstemp(prefix=os.path.basename(params.h5out)+".tmp", dir=os.path.dirname(params.h5out))
os.close(tmpfd)
if params.remove_filters_first:
try:
p = subprocess.Popen(["h5repack","-f","NONE",params.h5in,tmpout], shell=False)
p.wait()
except OSError:
print "h5repack failed. Is h5repack installed?"
return
else:
shutil.copyfile(params.h5in, tmpout)
try:
redmas = ReduceMaster(tmpout)
if params.remove_detectorModule_data:
redmas.remove_redundant(params.remove_detectorModule_data)
if params.remove_overall:
redmas.remove_datasets(params.remove_overall)
if params.compress:
redmas.compress_large_datasets(params.compress)
redmas.close()
# Run h5repack to clean up the removed space
try:
p = subprocess.Popen(["h5repack",tmpout,params.h5out], shell=False)
p.wait()
except OSError:
print "h5repack failed. Is h5repack installed?"
finally:
os.remove(tmpout)
master_size_after = os.path.getsize(params.h5out)
print
print "Finished."
print " Original file: %s (%.2f MB)" % (params.h5in, master_size_org/1024**2)
print " Generated file: %s (%.2f MB)" % (params.h5out, master_size_after/1024**2)
# run()
def print_help(command_name):
print """\
This script (re)modifies master.h5 file of EIGER detectors written by DECTRIS software.
All features are optional:
- Remove filters
- Apply bslz4 filters to large datasets
- Remove unnecessary data to save the disk space
- Remove (maybe necessary but) large data to further save the disk space
You need h5repack program. You also need phenix-1.11 or later if you use phenix.python; dials.python may be used instead.
* Usage:
%(command_name)s yours_master.h5 [h5out=yours_master_reduced.h5] [remove_filters_first=True] [compress=bslz4_and_gzipshuf] [remove_detectorModule_data=flatfield+pixel_mask+trimbit]
* In case you're BL32XU user, collected data before 2017, and want to use Neggia plugin for XDS (and reduce file size anyway):
mv yours_master.h5 yours_master.h5.org
%(command_name)s yours_master.h5.org h5out=yours_master.h5 compress=bslz4 remove_detectorModule_data=flatfield+pixel_mask+trimbit
Default parameters:""" % dict(command_name=command_name)
iotbx.phil.parse(master_params_str).show(prefix=" ", attributes_level=1)
# print_help()
def run_from_args(argv, command_name="phenix.python this-script"):
if not argv or "-h" in argv or "--help" in argv:
print_help(command_name)
return
cmdline = iotbx.phil.process_command_line(args=argv,
master_string=master_params_str)
params = cmdline.work.extract()
args = cmdline.remaining_args
for arg in args:
if not os.path.isfile(arg):
print "File not found: %s" % arg
return
if params.h5in is None and arg.endswith(".h5"):
params.h5in = arg
if params.h5in is None:
print "Please give _master.h5 file."
return
if params.h5out is None:
params.h5out = os.path.splitext(params.h5in)[0] + "_reduce.h5"
run(params)
# run_from_args()
if __name__ == "__main__":
import sys
run_from_args(sys.argv[1:])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
import testtools
from nova import exception
from nova import objects
from nova.objects import fields
from nova.tests.unit.objects import test_objects
fake_instance_uuid = uuids.fake
fake_obj_numa_topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set([1, 2]), memory=512,
pagesize=2048),
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set([3, 4]), memory=512,
pagesize=2048),
])
fake_db_topology = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'instance_uuid': fake_instance_uuid,
'numa_topology': fake_obj_numa_topology._to_json()
}
def get_fake_obj_numa_topology(context):
fake_obj_numa_topology_cpy = fake_obj_numa_topology.obj_clone()
fake_obj_numa_topology_cpy._context = context
return fake_obj_numa_topology_cpy
class _TestInstanceNUMACell(object):
def test_siblings(self):
# default thread number of VirtualCPUTopology is one, one thread means
# no thread and no sibling
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), pcpuset=set())
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), pcpuset=set([4, 5, 6]))
self.assertEqual([], inst_cell.siblings)
# 'threads=0' means no sibling
topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=0)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2]), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
# One thread actually means no threads
topo = objects.VirtCPUTopology(sockets=1, cores=3, threads=1)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2]), cpu_topology=topo)
self.assertEqual([], inst_cell.siblings)
# 2 threads per virtual core, and numa node has only one type CPU
# pinned and un-pinned.
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
# 4 threads per virtual core, numa node has only one type CPU
topo = objects.VirtCPUTopology(sockets=1, cores=1, threads=4)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]), pcpuset=set(), cpu_topology=topo)
self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
inst_cell = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1, 2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1, 2, 3])], inst_cell.siblings)
# 2 threads per virtual core, numa node with two type CPUs, the pinned
# and un-pinned
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=2)
inst_cell = objects.InstanceNUMACell(
cpuset=set([0, 1]), pcpuset=set([2, 3]), cpu_topology=topo)
self.assertEqual([set([0, 1]), set([2, 3])], inst_cell.siblings)
def test_pin(self):
inst_cell = objects.InstanceNUMACell(
cpuset=set([4, 5]), pcpuset=set([0, 1, 2, 3]), cpu_pinning=None)
# Only vCPU in the 'pcpuset' list is eligible for pinning
inst_cell.pin(0, 14)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# vCPU 12 is not a CPU of this cell, drop silently
inst_cell.pin(12, 14)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# vCPU in the 'cpuset' which is for floating CPUs, drop silently
inst_cell.pin(4, 15)
self.assertEqual({0: 14}, inst_cell.cpu_pinning)
# Another vCPU appeared in 'pcpuset', ready for pinning
inst_cell.pin(1, 16)
self.assertEqual({0: 14, 1: 16}, inst_cell.cpu_pinning)
def test_pin_vcpus(self):
inst_cell = objects.InstanceNUMACell(
cpuset=set([4, 5]), pcpuset=set([0, 1, 2, 3]), cpu_pinning=None)
# 'pcpuset' is the vCPU list for pinning
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
# vCPU out of 'pcpuset' list will not be added to the CPU pinning list
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17), (4, 18))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
# vCPU not belonging to this cell will be dropped silently
inst_cell.pin_vcpus((0, 14), (1, 15), (2, 16), (3, 17), (10, 18))
self.assertEqual({0: 14, 1: 15, 2: 16, 3: 17}, inst_cell.cpu_pinning)
def test_cpu_pinning(self):
topo_obj = get_fake_obj_numa_topology(self.context)
self.assertEqual(set(), topo_obj.cpu_pinning)
topo_obj.cells[0].pin_vcpus((1, 10), (2, 11))
self.assertEqual(set([10, 11]), topo_obj.cpu_pinning)
topo_obj.cells[1].pin_vcpus((3, 0), (4, 1))
self.assertEqual(set([0, 1, 10, 11]), topo_obj.cpu_pinning)
def test_clear_host_pinning(self):
topo_obj = get_fake_obj_numa_topology(self.context)
topo_obj.cells[0].pin_vcpus((1, 10), (2, 11))
topo_obj.cells[0].id = 3
topo_obj.cells[1].pin_vcpus((3, 0), (4, 1))
topo_obj.cells[1].id = 0
topo_obj.clear_host_pinning()
self.assertEqual({}, topo_obj.cells[0].cpu_pinning)
self.assertEqual(-1, topo_obj.cells[0].id)
self.assertEqual({}, topo_obj.cells[1].cpu_pinning)
self.assertEqual(-1, topo_obj.cells[1].id)
def test_emulator_threads_policy(self):
topo_obj = get_fake_obj_numa_topology(self.context)
self.assertFalse(topo_obj.emulator_threads_isolated)
topo_obj.emulator_threads_policy = (
fields.CPUEmulatorThreadsPolicy.ISOLATE)
self.assertTrue(topo_obj.emulator_threads_isolated)
def test_obj_make_compatible(self):
topo_obj = objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([0, 1]),
cpuset_reserved=set([1, 2]),
cpu_policy=fields.CPUAllocationPolicy.MIXED,
)
versions = ovo_base.obj_tree_get_versions('InstanceNUMACell')
data = lambda x: x['nova_object.data']
primitive = data(topo_obj.obj_to_primitive(
target_version='1.6', version_manifest=versions))
self.assertEqual(
fields.CPUAllocationPolicy.MIXED, primitive['cpu_policy'])
self.assertRaises(
exception.ObjectActionError,
topo_obj.obj_to_primitive,
target_version='1.5', version_manifest=versions)
# set this to something compatible with < 1.6 so we can keep testing
topo_obj.cpu_policy = fields.CPUAllocationPolicy.DEDICATED
primitive = data(topo_obj.obj_to_primitive(
target_version='1.5', version_manifest=versions))
self.assertIn('pcpuset', primitive)
primitive = data(topo_obj.obj_to_primitive(
target_version='1.4', version_manifest=versions))
self.assertNotIn('pcpuset', primitive)
self.assertEqual(set([0, 1]), set(primitive['cpuset']))
self.assertIn('cpuset_reserved', primitive)
primitive = data(topo_obj.obj_to_primitive(
target_version='1.3', version_manifest=versions))
self.assertNotIn('cpuset_reserved', primitive)
class TestInstanceNUMACell(
test_objects._LocalTest, _TestInstanceNUMACell,
):
pass
class TestInstanceNUMACellRemote(
test_objects._RemoteTest, _TestInstanceNUMACell,
):
pass
class _TestInstanceNUMATopology(object):
@mock.patch('nova.db.main.api.instance_extra_update_by_uuid')
def test_create(self, mock_update):
topo_obj = get_fake_obj_numa_topology(self.context)
topo_obj.instance_uuid = fake_db_topology['instance_uuid']
topo_obj.create()
self.assertEqual(1, len(mock_update.call_args_list))
def _test_get_by_instance_uuid(self):
numa_topology = objects.InstanceNUMATopology.get_by_instance_uuid(
self.context, fake_db_topology['instance_uuid'])
self.assertEqual(fake_db_topology['instance_uuid'],
numa_topology.instance_uuid)
for obj_cell, topo_cell in zip(
numa_topology.cells, fake_obj_numa_topology['cells']):
self.assertIsInstance(obj_cell, objects.InstanceNUMACell)
self.assertEqual(topo_cell.id, obj_cell.id)
self.assertEqual(topo_cell.cpuset, obj_cell.cpuset)
self.assertEqual(topo_cell.memory, obj_cell.memory)
self.assertEqual(topo_cell.pagesize, obj_cell.pagesize)
@mock.patch('nova.db.main.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid(self, mock_get):
mock_get.return_value = fake_db_topology
self._test_get_by_instance_uuid()
@mock.patch('nova.db.main.api.instance_extra_get_by_instance_uuid')
def test_get_by_instance_uuid_missing(self, mock_get):
mock_get.return_value = None
self.assertRaises(
exception.NumaTopologyNotFound,
objects.InstanceNUMATopology.get_by_instance_uuid,
self.context, 'fake_uuid')
def test_cpu_policy(self):
cpu_policy = fields.CPUAllocationPolicy.SHARED
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=cpu_policy,
),
],
)
self.assertEqual(cpu_policy, topology.cpu_policy)
def test_cpu_policy__error(self):
"""Ensure we raise an error if cells have different values."""
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
cpuset=set([0, 1, 2, 3]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=None,
),
objects.InstanceNUMACell(
cpuset=set([4, 5, 6, 7]),
pcpuset=set(),
cpu_pinning=None,
cpu_policy=fields.CPUAllocationPolicy.SHARED
),
],
)
with testtools.ExpectedException(exception.InternalError):
topology.cpu_policy
def test_cpuset_reserved(self):
topology = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=512,
pagesize=2048, cpuset_reserved=set([3, 7])),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=512,
pagesize=2048, cpuset_reserved=set([9, 12]))
])
self.assertEqual(set([3, 7]), topology.cells[0].cpuset_reserved)
self.assertEqual(set([9, 12]), topology.cells[1].cpuset_reserved)
def test_obj_make_compatible_numa(self):
topo_obj = objects.InstanceNUMATopology(
emulator_threads_policy=(
fields.CPUEmulatorThreadsPolicy.ISOLATE))
versions = ovo_base.obj_tree_get_versions('InstanceNUMATopology')
primitive = topo_obj.obj_to_primitive(target_version='1.2',
version_manifest=versions)
self.assertNotIn(
'emulator_threads_policy', primitive['nova_object.data'])
topo_obj = objects.InstanceNUMATopology.obj_from_primitive(primitive)
self.assertFalse(topo_obj.emulator_threads_isolated)
def test_obj_from_db_obj(self):
"""Test of creating 'InstanceNUMATopology' OVO object from the
database primitives, which has an old version 'InstanceNUMACell'
primitives.
Prior to version 1.5, 'InstanceNUMACell' saves the instance CPUs in the
'cpuset' field, for both the pinned CPUs of a dedicated and the
un-pinned CPUs of a shared instances, after version 1.5, any pinned
CPUs of dedicated instance are moved to 'pcpuset'. this test verifies
the CPU movement for instance with a 'dedicated' allocation policy.
"""
fake_topo_obj_w_cell_v1_4 = objects.InstanceNUMATopology(
instance_uuid=fake_instance_uuid,
cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1, 2]), pcpuset=set(), memory=512,
pagesize=2048),
objects.InstanceNUMACell(
id=1, cpuset=set([3, 4]), pcpuset=set(), memory=512,
pagesize=2048),
])
fake_topo_obj = copy.deepcopy(fake_topo_obj_w_cell_v1_4)
for cell in fake_topo_obj.cells:
cell.cpu_policy = objects.fields.CPUAllocationPolicy.DEDICATED
numa_topology = objects.InstanceNUMATopology.obj_from_db_obj(
self.context, fake_instance_uuid, fake_topo_obj._to_json())
for obj_cell, topo_cell in zip(
numa_topology.cells,
fake_topo_obj_w_cell_v1_4['cells']):
self.assertEqual(set(), obj_cell.cpuset)
self.assertEqual(topo_cell.cpuset, obj_cell.pcpuset)
class TestInstanceNUMATopology(
test_objects._LocalTest, _TestInstanceNUMATopology,
):
pass
class TestInstanceNUMATopologyRemote(
test_objects._RemoteTest, _TestInstanceNUMATopology,
):
pass
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a sqoop 1 operator
"""
import os
import signal
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.sqoop.hooks.sqoop import SqoopHook
from airflow.utils.decorators import apply_defaults
# pylint: disable=too-many-instance-attributes
class SqoopOperator(BaseOperator):
"""
Execute a Sqoop job.
Documentation for Apache Sqoop can be found here:
https://sqoop.apache.org/docs/1.4.2/SqoopUserGuide.html
:param conn_id: str
:param cmd_type: str specify command to execute "export" or "import"
:param table: Table to read
:param query: Import result of arbitrary SQL query. Instead of using the table,
columns and where arguments, you can specify a SQL statement with the query
argument. Must also specify a destination directory with target_dir.
:param target_dir: HDFS destination directory where the data
from the rdbms will be written
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" Imports data to
into the specified format. Defaults to text.
:param columns: <col,col,col> Columns to import from table
:param num_mappers: Use n mapper tasks to import/export in parallel
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param export_dir: HDFS Hive database directory to export to the rdbms
:param input_null_string: The string to be interpreted as null
for string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the input field separator
:param input_lines_terminated_by: Sets the input end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param direct: Use direct export fast path
:param driver: Manually specify JDBC driver class to use
:param verbose: Switch to more verbose logging for debug purposes
:param relaxed_isolation: use read uncommitted isolation level
:param hcatalog_database: Specifies the database name for the HCatalog table
:param hcatalog_table: The argument value for this option is the HCatalog table
:param create_hcatalog_table: Have sqoop create the hcatalog table passed
in or not
:param properties: additional JVM properties passed to sqoop
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
template_fields = ('conn_id', 'cmd_type', 'table', 'query', 'target_dir',
'file_type', 'columns', 'split_by',
'where', 'export_dir', 'input_null_string',
'input_null_non_string', 'staging_table',
'enclosed_by', 'escaped_by', 'input_fields_terminated_by',
'input_lines_terminated_by', 'input_optionally_enclosed_by',
'properties', 'extra_import_options', 'driver',
'extra_export_options', 'hcatalog_database', 'hcatalog_table',)
ui_color = '#7D8CA4'
# pylint: disable=too-many-arguments,too-many-locals
@apply_defaults
def __init__(self,
conn_id='sqoop_default',
cmd_type='import',
table=None,
query=None,
target_dir=None,
append=None,
file_type='text',
columns=None,
num_mappers=None,
split_by=None,
where=None,
export_dir=None,
input_null_string=None,
input_null_non_string=None,
staging_table=None,
clear_staging_table=False,
enclosed_by=None,
escaped_by=None,
input_fields_terminated_by=None,
input_lines_terminated_by=None,
input_optionally_enclosed_by=None,
batch=False,
direct=False,
driver=None,
verbose=False,
relaxed_isolation=False,
properties=None,
hcatalog_database=None,
hcatalog_table=None,
create_hcatalog_table=False,
extra_import_options=None,
extra_export_options=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.conn_id = conn_id
self.cmd_type = cmd_type
self.table = table
self.query = query
self.target_dir = target_dir
self.append = append
self.file_type = file_type
self.columns = columns
self.num_mappers = num_mappers
self.split_by = split_by
self.where = where
self.export_dir = export_dir
self.input_null_string = input_null_string
self.input_null_non_string = input_null_non_string
self.staging_table = staging_table
self.clear_staging_table = clear_staging_table
self.enclosed_by = enclosed_by
self.escaped_by = escaped_by
self.input_fields_terminated_by = input_fields_terminated_by
self.input_lines_terminated_by = input_lines_terminated_by
self.input_optionally_enclosed_by = input_optionally_enclosed_by
self.batch = batch
self.direct = direct
self.driver = driver
self.verbose = verbose
self.relaxed_isolation = relaxed_isolation
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.create_hcatalog_table = create_hcatalog_table
self.properties = properties
self.extra_import_options = extra_import_options or {}
self.extra_export_options = extra_export_options or {}
self.hook = None
def execute(self, context):
"""
Execute sqoop job
"""
self.hook = SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties
)
if self.cmd_type == 'export':
self.hook.export_table(
table=self.table,
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options)
elif self.cmd_type == 'import':
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here
# so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options['create-hcatalog-table'] = ''
if self.table and self.query:
raise AirflowException(
'Cannot specify query and table together. Need to specify either or.'
)
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options)
else:
raise AirflowException(
"Provide query or table parameter to import using Sqoop"
)
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
def on_kill(self):
self.log.info('Sending SIGTERM signal to bash process group')
os.killpg(os.getpgid(self.hook.sub_process.pid), signal.SIGTERM)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_available_stacks_request(
*,
os_type_selected: Optional[Union[str, "_models.Enum0"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/availableStacks')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_operations_request(
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/providers/Microsoft.Web/operations')
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_available_stacks_on_prem_request(
subscription_id: str,
*,
os_type_selected: Optional[Union[str, "_models.Enum1"]] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2016-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if os_type_selected is not None:
query_parameters['osTypeSelected'] = _SERIALIZER.query("os_type_selected", os_type_selected, 'str')
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProviderOperations(object):
"""ProviderOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2016_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_available_stacks(
self,
os_type_selected: Optional[Union[str, "_models.Enum0"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2016_03_01.models.Enum0
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2016_03_01.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=self.get_available_stacks.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_request(
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks.metadata = {'url': '/providers/Microsoft.Web/availableStacks'} # type: ignore
@distributed_trace
def list_operations(
self,
**kwargs: Any
) -> Iterable["_models.CsmOperationCollection"]:
"""Gets all available operations for the Microsoft.Web resource provider. Also exposes resource
metric definitions.
Gets all available operations for the Microsoft.Web resource provider. Also exposes resource
metric definitions.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmOperationCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2016_03_01.models.CsmOperationCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmOperationCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_operations_request(
template_url=self.list_operations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_operations_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("CsmOperationCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_operations.metadata = {'url': '/providers/Microsoft.Web/operations'} # type: ignore
@distributed_trace
def get_available_stacks_on_prem(
self,
os_type_selected: Optional[Union[str, "_models.Enum1"]] = None,
**kwargs: Any
) -> Iterable["_models.ApplicationStackCollection"]:
"""Get available application frameworks and their versions.
Get available application frameworks and their versions.
:param os_type_selected:
:type os_type_selected: str or ~azure.mgmt.web.v2016_03_01.models.Enum1
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApplicationStackCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2016_03_01.models.ApplicationStackCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApplicationStackCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=self.get_available_stacks_on_prem.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_get_available_stacks_on_prem_request(
subscription_id=self._config.subscription_id,
os_type_selected=os_type_selected,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ApplicationStackCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_available_stacks_on_prem.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/availableStacks'} # type: ignore
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Question.accepted_time'
db.alter_column(u'catalog_question', 'accepted_time', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# Changing field 'Question.accepted_time'
db.alter_column(u'catalog_question', 'accepted_time', self.gf('django.db.models.fields.DateTimeField')(default=1))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalog.answer': {
'Meta': {'object_name': 'Answer'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Question']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'catalog.article': {
'Meta': {'object_name': 'Article'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'new_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.NewUser']", 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'recommendation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.ArticleTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articleemail': {
'Meta': {'object_name': 'ArticleEmail'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'email_subscriptions'", 'null': 'True', 'to': "orm['catalog.ArticleTag']"}),
'temp_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.articletag': {
'Meta': {'object_name': 'ArticleTag'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url_snippet': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.favoritemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'FavoriteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'full_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_s3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['auth.User']"})
},
'catalog.instructablestep': {
'Meta': {'ordering': "['-step']", 'object_name': 'InstructableStep'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'null': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'step': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'default': 'None', 'max_length': '200', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'words': ('django.db.models.fields.IntegerField', [], {'default': '-1'})
},
'catalog.inventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'Inventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_part'", 'to': "orm['catalog.Product']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likearticle': {
'Meta': {'unique_together': "(('user', 'article'),)", 'object_name': 'LikeArticle'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Article']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecfistoreitem': {
'Meta': {'unique_together': "(('user', 'cfi_store_item'),)", 'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likechannel': {
'Meta': {'unique_together': "(('user', 'channel'),)", 'object_name': 'LikeChannel'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ArticleTag']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likecomment': {
'Meta': {'unique_together': "(('user', 'comment'),)", 'object_name': 'LikeComment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Comment']"}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likelisting': {
'Meta': {'unique_together': "(('user', 'listing'),)", 'object_name': 'LikeListing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'listing': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Listing']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'makeylikes'", 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likenote': {
'Meta': {'unique_together': "(('user', 'note'),)", 'object_name': 'LikeNote'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproduct': {
'Meta': {'unique_together': "(('user', 'product'),)", 'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductdescription': {
'Meta': {'unique_together': "(('user', 'product_description'),)", 'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproductimage': {
'Meta': {'unique_together': "(('user', 'image'),)", 'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeproducttutorial': {
'Meta': {'unique_together': "(('user', 'tutorial', 'product'),)", 'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likeshop': {
'Meta': {'unique_together': "(('user', 'shop'),)", 'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.likevideo': {
'Meta': {'unique_together': "(('user', 'video'),)", 'object_name': 'LikeVideo'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'fb_like_id': ('django.db.models.fields.CharField', [], {'default': "'-1'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Video']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listing': {
'Meta': {'object_name': 'Listing'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'content': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'about': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'added_time_staff_pick': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'as_part': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.Product']"}),
'as_part_new': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_makey'", 'null': 'True', 'to': "orm['catalog.NewProduct']"}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'cover_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'credits': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'derived_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'forked_as'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff_pick': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'made_in': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'makeys_made_in'", 'null': 'True', 'to': "orm['catalog.Space']"}),
'mentors': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modules_used': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'used_in'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_parts': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_parts'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'new_users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeys'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'removed_collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makey_removed'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyvideos'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Video']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'catalog.makeyimage': {
'Meta': {'object_name': 'MakeyImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey_id': ('django.db.models.fields.IntegerField', [], {}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.newinventory': {
'Meta': {'unique_together': "(('part', 'space'),)", 'object_name': 'NewInventory'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_part'", 'to': "orm['catalog.NewProduct']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_inventory_space'", 'to': "orm['catalog.Space']"})
},
'catalog.newproduct': {
'Meta': {'object_name': 'NewProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.newuser': {
'Meta': {'object_name': 'NewUser'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.note': {
'Meta': {'ordering': "['order']", 'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'makeys_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'tools_used'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'space_as_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tools_in_space'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Space']"}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['catalog.Tutorial']"})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.question': {
'Meta': {'object_name': 'Question'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'answer_of'", 'null': 'True', 'blank': 'True', 'to': "orm['catalog.Answer']"}),
'accepted_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.space': {
'Meta': {'object_name': 'Space'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'date_of_founding': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.Inventory']", 'to': "orm['catalog.Product']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'last_updated_external': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'logo': ('django.db.models.fields.URLField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '15', 'decimal_places': '10', 'blank': 'True'}),
'map_zoom_level': ('django.db.models.fields.IntegerField', [], {'default': '13'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_members'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'membership_fee': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'new_inventory': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'space_new_inventory'", 'symmetrical': 'False', 'through': "orm['catalog.NewInventory']", 'to': "orm['catalog.NewProduct']"}),
'new_members': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_members'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewUser']"}),
'new_tools': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'space_new_tools'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.NewProduct']"}),
'no_of_members': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'catalog.spacereview': {
'Meta': {'object_name': 'SpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'space': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'space_reviews'", 'to': "orm['catalog.Space']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.textdocumentation': {
'Meta': {'object_name': 'TextDocumentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('tinymce.models.HTMLField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'text_documentations'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.upfile': {
'Meta': {'object_name': 'UpFile'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'filetype': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'files'", 'null': 'True', 'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userflags': {
'Meta': {'object_name': 'UserFlags'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_maker_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_makey_intro': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userinteraction': {
'Meta': {'object_name': 'UserInteraction'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'event': ('django.db.models.fields.IntegerField', [], {}),
'event_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'catalog.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'aboutme': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'blog_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'college': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.UserProfile']"}),
'github_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructables_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Bangalore, India'", 'max_length': '255'}),
'membership': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'patent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'profile_pic': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Image']", 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stackoverflow_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'twitter_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'yt_channel_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'})
},
'catalog.video': {
'Meta': {'object_name': 'Video'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalog.Comment']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'embed_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'site': ('django.db.models.fields.IntegerField', [], {}),
'thumb_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'catalog.votemakey': {
'Meta': {'unique_together': "(('user', 'makey'),)", 'object_name': 'VoteMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteproductreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.voteshopreview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ShopReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votespacereview': {
'Meta': {'unique_together': "(('user', 'review'),)", 'object_name': 'VoteSpaceReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'review': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SpaceReview']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalog.votetutorial': {
'Meta': {'unique_together': "(('user', 'tutorial'),)", 'object_name': 'VoteTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'vote': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['catalog']
|
|
import logging, re, os, shutil, sys
from datetime import date, datetime
from dataclasses import dataclass
from collections import defaultdict
from os.path import expanduser
from typing import List, Optional
from dateutil.parser import parse as dateutil_parse
from modgrammar import *
import yaml
from invoice import Invoice
def get_default_settings():
settings = {
'billcode': True,
'billcodes': {},
'billrate': 1000.,
'footer': [],
'prefix': '',
'invoice_on': 'marker',
'invoice_marker': '====',
'summary_on': 'marker',
'summary_marker': '----',
'verbose': 0,
'weekly_summary_template': '---------- {hours_this_week} ({hours_since_invoice} uninvoiced)',
'invoice_template': '========== {hours_this_week} ({hours_since_invoice} since invoice)',
'invoice_filename_template': 'invoice-{invoice_code}.pdf',
'address': []
}
return settings
def samefile(f1, f2):
'''
A simple replacement of the os.path.samefile() function not existing
on the Windows platform.
MAC/Unix supported in standard way :).
Author: Denis Barmenkov <denis.barmenkov@gmail.com>
Source: code.activestate.com/recipes/576886/
Copyright: this code is free, but if you want to use it, please
keep this multiline comment along with function source.
Thank you.
2009-08-19 20:13
'''
try:
return os.path.samefile(f1, f2)
except AttributeError:
f1 = os.path.abspath(f1).lower()
f2 = os.path.abspath(f2).lower()
return f1 == f2
@dataclass
class TimesheetLineItem:
date: date
prefix: Optional[str] = None
suffix: Optional[str] = None
billcode: Optional[str] = None
hours: Optional[int] = None
ranges: Optional[List[int]] = None
# @dataclass
# class TimesheetSummary:
# hours:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
grammar_whitespace_mode = 'explicit'
class MyDate(Grammar):
# grammar = (WORD('0-9', "-0-9/", grammar_name='date'))
grammar = (WORD('2', "-0-9", fullmatch=True, grammar_name='date')
| WORD('0-9', "-0-9/", grammar_name='date'))
grammar_tags = ['date']
class BillCode(Grammar):
""" All capital letter billing code. """
grammar = (WORD("A-Z", grammar_name='bill_code'))
class Hours(Grammar):
grammar = (WORD(".0-9", grammar_name='hours'), OPTIONAL("h"))
class Hour(Grammar):
grammar = WORD("0-9", min=1, max=2, grammar_name='hour')
class Minute(Grammar):
grammar = WORD("0-9", min=1, max=2, grammar_name='minute')
class AMPM(Grammar):
grammar = L("A") | L("P") | L("a") | L("p")
class MyTime(Grammar):
grammar = (G(Hour, OPTIONAL(":", Minute))), OPTIONAL(AMPM)
# grammar = (WORD("0-9:", grammar_name='time'), OPTIONAL(L("A") | L("P") | L("a") | L("p"), grammar_name='ampm'))
class Range(Grammar):
grammar = G(MyTime, OPTIONAL(WHITESPACE), '-', OPTIONAL(WHITESPACE),
OPTIONAL(MyTime), OPTIONAL('(', Hours, ')'), grammar_name='range')
class RangeList(Grammar):
grammar = LIST_OF(G(Range | Hours), sep=G(",", OPTIONAL(WHITESPACE)), grammar_name="ranges")
class Prefix(Grammar):
grammar = (ZERO_OR_MORE(L('*') | WHITESPACE), )
class Suffix(Grammar):
grammar = (OPTIONAL(WHITESPACE), OPTIONAL(L('#'), REST_OF_LINE), EOF)
class MyGrammar (Grammar):
grammar = (
G(Prefix, MyDate, WHITESPACE, Hours, WHITESPACE, RangeList, Suffix, grammar_name="3args") |
G(Prefix, MyDate, WHITESPACE, RangeList, Suffix, grammar_name="2argrange") |
G(Prefix, MyDate, WHITESPACE, Hours, Suffix, grammar_name="2arghours") |
G(Prefix, MyDate, WHITESPACE, BillCode, WHITESPACE, Hours, WHITESPACE, RangeList, Suffix, grammar_name="3args") |
G(Prefix, MyDate, WHITESPACE, BillCode, WHITESPACE, RangeList, Suffix, grammar_name="2argrange") |
G(Prefix, MyDate, WHITESPACE, BillCode, WHITESPACE, Hours, Suffix, grammar_name="2arghours") |
G(Prefix, MyDate, Suffix, grammar_name="justdate")
)
myparser = MyGrammar.parser()
time_regex = re.compile(r'(\d{1,2})(:\d+)?([aApP])?')
def parse_time(cur_date, time_str, after=None):
""" Parse time
>>> parse_time(datetime(2015, 6, 3, 0, 0), '12p')
datetime.datetime(2015, 6, 3, 12, 0)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '12:01p')
datetime.datetime(2015, 6, 3, 12, 1)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '12a')
datetime.datetime(2015, 6, 3, 0, 0)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '1')
datetime.datetime(2015, 6, 3, 13, 0)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '1a')
datetime.datetime(2015, 6, 3, 1, 0)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '11:45p')
datetime.datetime(2015, 6, 3, 23, 45)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '12:45a')
datetime.datetime(2015, 6, 3, 0, 45)
>>> parse_time(datetime(2015, 6, 3, 0, 0), '12:45p')
datetime.datetime(2015, 6, 3, 12, 45)
"""
m = time_regex.match(time_str)
# print( "[parse_time]: " + time_str, m.groups())
if not m:
return None
g = m.groups()
# print(g)
hour = int(g[0])
minute = 0
if g[1] is not None:
minute = int(g[1][1:])
if g[2] is not None:
if hour != 12 and g[2] in ('p','P'):
hour += 12
elif hour == 12 and g[2] in ('a','A'):
hour -= 12
else:
# AM/PM not specified.
time_as_am_guess = datetime(cur_date.year, cur_date.month, cur_date.day, hour=hour, minute=minute)
if after is not None:
if after > time_as_am_guess:
hour += 12
else:
if hour < 7:
logger.warning("Assuming time {} is PM".format(time_str))
hour += 12
return datetime(cur_date.year, cur_date.month, cur_date.day, hour=hour, minute=minute)
class TimesheetParseError(Exception):
pass
def parse(line, settings=None, prefix=None) -> Optional[TimesheetLineItem]:
""" Parse grammar.
>>> myparser.parse_text("5/20/2015", reset=True, eof=True)
MyGrammar<'5/20/2015'>
>>> parse("5/20/2015", prefix='')
TimesheetLineItem(date=datetime.date(2015, 5, 20), prefix=Prefix<''>, suffix=Suffix<None, None, ''>, billcode=None, hours=None, ranges=None)
>>> d = parse("6/21/2015 1.25 3:33-4:44a", prefix='')
Traceback (most recent call last):
...
ts.TimesheetParseError: 2015-06-21 04:44:00 < 2015-06-21 15:33:00 in 6/21/2015 1.25 3:33-4:44a
>>> d = parse("5/20/2015 5 10:10 - 10:25a, 12-", prefix='')
>>> d.date
datetime.date(2015, 5, 20)
>>> d.hours
0.25
>>> len(d.ranges)
2
>>> d.ranges[0]['s']
datetime.datetime(2015, 5, 20, 10, 10)
>>> format_ret(d)
'2015-05-20 .25 10:10a-10:25a(.25), 12p-'
>>> d = parse('6/15/2015 4.25 10a-11:30(1.5), 3-5:45p(2.75)', prefix='')
>>> d.ranges[1]['duration']
2.75
>>> d = parse('* 2015-06-03 1.5 10a-11:15a, 12:45p-1p, 6-6:15 # whatever yo', prefix='* ')
>>> d = parse('* 7/22/2015 6.25 10:00a-11:30a(1.5), 12:30p-3:30p(3), 9:15p-11p(1.75)', prefix='* ')
>>> d = parse('* 7/13/2015 3.5 .25, 1:30p-5p', prefix='* ')
>>> format_ret(d)
'* 2015-07-13 3.75 .25, 1:30p-5p(3.50)'
"""
if settings is None:
settings = get_default_settings()
if prefix is None:
prefix = settings.get('prefix','* ')
if not line.strip():
return None
line = line.rstrip()
origresult = myparser.parse_text(line, reset=True, eof=True) #, matchtype='longest')
result = origresult.elements[0]
date_g = result.get(MyDate)
if date_g is None:
return None
cur_date = dateutil_parse(str(date_g)).date()
ret = TimesheetLineItem(date=cur_date)
ret.prefix = result.get(Prefix)
ret.suffix = result.get(Suffix)
ret.billcode = result.get(BillCode)
hours_g = result.get(Hours)
if hours_g is not None:
ret.hours = float(str(hours_g))
ranges = result.get(RangeList)
if ranges is not None:
ret.ranges = []
# logger.debug(ranges.elements)
for r in ranges.elements[0].elements:
if r.grammar_name == 'Hours':
duration = float(str(r))
ret.ranges.append( {'duration': duration} )
elif r.grammar_name == 'Range':
times = r.find_all(MyTime)
if len(times)==1:
start = str(times[0])
end = None
elif len(times)==2:
start = str(times[0])
end = str(times[1])
else:
raise Exception()
try:
parsed_start = parse_time(cur_date, start)
except (ValueError, ):
parsed_start = None
parsed_end = None
if end is not None:
try:
parsed_end = parse_time(cur_date, end, after=parsed_start)
except (ValueError, AttributeError):
pass
if parsed_end is not None:
if parsed_end < parsed_start:
# import pdb; pdb.set_trace()
raise TimesheetParseError("{} < {} in {}".format(parsed_end, parsed_start, line))
duration = (parsed_end-parsed_start).seconds/60./60.
else:
duration = None
ret.ranges.append( {'s': parsed_start, 'e': parsed_end, 'duration': duration} )
else:
pass
if ret.ranges is not None:
total_duration = sum([r['duration'] for r in ret.ranges if r['duration'] is not None])
if ret.hours is not None and format_hours(total_duration) != format_hours(ret.hours):
logger.warning('Changing total hours from %s to %s\n Original: %s' % (ret.hours, total_duration, line))
ret.hours = total_duration
if len(ret.ranges) == 1 and 's' not in ret.ranges[0]:
del ret.ranges
if ret.hours is not None and ret.hours > 9:
logger.warning('Calculated duration={}, which is above normal\n Original: {}'.format(ret.hours, line))
if settings['verbose'] >= 2:
print('= parsed={}'.format(ret))
return ret
def format_hours(h):
if h is None:
return '-'
if int(h) == h:
return str(int(h))
return ("%.2f" % h).lstrip('0')
def format_time(t):
""" Print out succinct time.
>>> format_time(datetime(2015, 1, 1, 5, 15, 0))
'5:15a'
>>> format_time(datetime(2015, 1, 1, 12, 0, 0))
'12p'
>>> format_time(datetime(2015, 1, 1, 0, 1, 0))
'12:01a'
"""
if t is None:
return ""
ampm = "a"
if t.hour > 12:
ampm = "p"
hour = t.hour - 12
elif t.hour == 12:
ampm = "p"
hour = 12
elif t.hour == 0:
hour = 12
else:
hour = t.hour
if t.minute==0:
s = "%d%s" % (hour, ampm)
else:
s = "%d:%02d%s" % (hour, t.minute, ampm)
return s
def format_range(r,):
if 's' not in r:
return '%s' % format_hours(r['duration'])
else:
if r['e'] is not None:
return "%s-%s(%s)" % (format_time(r['s']), format_time(r['e']), format_hours(r['duration']))
else:
return "%s-" % (format_time(r['s']), )
def format_ret(ret, settings=None):
if settings is None:
settings = get_default_settings()
formatted_billcode = ''
if settings['billcode']:
formatted_billcode = '%5s'% (ret.billcode or '', )
if ret.ranges is None:
total_duration = ret.hours
output = '%10s%s %5s' % (ret.date, formatted_billcode, format_hours(total_duration))
else:
parsed_ranges = ret.ranges
rearranges = [format_range(r) for r in parsed_ranges]
output = '%10s%s %5s %s' % (ret.date, formatted_billcode, format_hours(ret.hours), ", ".join(rearranges))
suffix = str(ret.suffix).strip()
if len(suffix) > 0:
suffix = " " + suffix
return '%s%s%s' % (ret.prefix, output, suffix)
FRONT_MATTER_TERMINUS_REGEX = re.compile('^---+$')
def load_front_matter(f):
""" Load jekyll-style front-matter config from top of file.
"""
settings = get_default_settings()
def update_from_file(settings, filename):
try:
default_f = open(filename)
except IOError:
print("'{}' not found, skipping...".format(filename))
return
if default_f:
print("loading from '{}'...".format(filename))
default_yml_settings = yaml.safe_load(default_f)
settings.update(default_yml_settings)
default_f.close()
update_from_file(settings, expanduser('~/.tsconfig.yml'))
update_from_file(settings, 'default.yml')
front_matter = []
found=False
for line in f:
if FRONT_MATTER_TERMINUS_REGEX.match(line):
found=True
break
front_matter.append(line)
if not found:
print("Front-matter YAML is required.")
sys.exit(1)
fm_settings = yaml.safe_load("".join(front_matter))
settings.update(fm_settings)
return settings, front_matter
def process_timesheet(f, outf, verbose=0, invoice=False):
global weekly_hours, invoice_hours
summary_results = {}
last_date = None
last_iso = None
invoice_has_started = False
weekly_hours = 0.
invoice_hours = 0.
invoice_hours_per_code = defaultdict(int)
def format_summary_line():
# global weekly_hours, invoice_hours,
weekly_summary_template = settings['prefix'] + settings['weekly_summary_template']
return weekly_summary_template.format(
hours_this_week=format_hours(weekly_hours),
hours_since_invoice=format_hours(invoice_hours))
def write_summary_line(invoice=False, original_line=''):
global weekly_hours, invoice_hours
if invoice:
template = settings['invoice_template']
else:
template = settings['weekly_summary_template']
summary_line = settings['prefix'] + template.format(
hours_this_week=format_hours(weekly_hours),
hours_since_invoice=format_hours(invoice_hours))
original_line_split = original_line.split('#', 1)
comment = ''
if len(original_line_split)==2:
comment = original_line_split[-1].strip()
summary_line += ' # ' + comment
invoice_id, invoice_description = '', ''
try:
invoice_id, invoice_description = comment.split(',', 1)
except:
pass
if invoice:
invoice_data = {'id': invoice_id, 'hours': invoice_hours, 'items': [], 'description': invoice_description.strip()}
for k,v in invoice_hours_per_code.items():
invoice_data['items'].append({'billcode': k, 'hours': v})
invoices.append(invoice_data)
if weekly_hours != 0. or invoice:
if settings['verbose'] >= 1:
print(summary_line)
if outf:
outf.write(summary_line + '\n')
weekly_hours = 0.
if invoice:
invoice_hours = 0.
invoice_hours_per_code.clear()
if outf:
outf.write('\n')
def write_final_summary_line():
if outf:
outf.write('\n')
if outf:
outf.write('\n')
settings, raw_front_matter = load_front_matter(f)
if args.verbose is not None:
settings['verbose'] = args.verbose
# logger.info("settings {}".format(settings))
for line in raw_front_matter:
outf.write(line)
# yaml.dump(settings, outf, default_flow_style=False)
outf.write('----\n')
invoices = []
for line in f:
if settings['verbose'] >= 1:
print('< {}'.format(line.rstrip()))
try:
if invoice_has_started:
# Found an invoice marker, so rewrite it...
if settings['invoice_on'] == 'marker' and line.startswith(settings['invoice_marker']):
write_summary_line(invoice=True, original_line=line)
if settings['verbose'] >= 1:
print("> Wrote summary line".format())
continue
# Throw out empty lines
if line.strip() == '':
continue
# Just throw out old summary lines.. we'll write them again ourselves.
if settings['summary_on'] == 'marker':
if line.startswith(settings['summary_marker']):
write_summary_line(original_line=line)
continue
else:
if line.startswith(format_summary_line()):
continue
ret = parse(line, settings)
if ret is None:
if settings['verbose'] >= 1 and line.strip() != '':
print("> Failed to parse. Writing straight.".format())
if outf:
outf.write(line.rstrip() + '\n')
continue
if not invoice_has_started and settings['verbose'] >= 1:
print("! Invoice has started!")
invoice_has_started = True
if last_date is not None and last_date > ret.date:
logger.warning('Date {} is listed after date {}.'.format(ret.date, last_date))
if ret.date in summary_results:
logger.warning('Date {} listed multiple times.'.format(ret.date))
iso = ret.date.isocalendar()
if settings['summary_on'] == 'weekly':
if last_iso is not None and (iso[0] != last_iso[0] or iso[1] != last_iso[1]):
write_summary_line()
last_date = ret.date
last_iso = iso
weekly_hours += ret.hours
invoice_hours += ret.hours
invoice_hours_per_code[str(ret.billcode or '')] += ret.hours
summary_results[ret.date] = ret
fixed_line = format_ret(ret, settings)
if settings['verbose'] >= 1:
print(">", fixed_line)
if outf:
outf.write(fixed_line.rstrip() + '\n')
except TimesheetParseError:
print("Problem parsing.")
raise
except ParseError:
if outf:
outf.write(line.rstrip() + '\n')
# print 'skipped...'
pass
# logger.exception("failed to parse")
# raise
write_summary_line()
if outf:
outf.close()
print("{} hours uninvoiced currently...".format(format_hours(invoice_hours)))
if args.invoice:
for i in invoices:
invoice = Invoice(i['id'], [], settings['client_name'], footer=settings['footer'], body=[i['description']], address=settings['address'])
for item in i['items']:
if settings['billcode']:
billcode_data = settings['billcodes'][item.billcode]
else:
billcode_data = settings['billcodes']['default']
invoice.add_item(
name=billcode_data['description'],
qty=round(item.hours, 2),
unit_price=billcode_data['rate'],
description=billcode_data['description'])
invoice_filename_template = settings['invoice_filename_template']
invoice_filename = invoice_filename_template.format(
invoice_code=i['id'],
client_name=settings['client_name']
)
invoice.save(invoice_filename)
print("Wrote invoice to {}".format(invoice_filename))
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Process a timesheet')
parser.add_argument('file', metavar='FILE')
parser.add_argument('-v', '--verbose', action='count', default=None)
parser.add_argument('-i', '--invoice', action='store_true', help='Write PDF invoice.')
parser.add_argument('-o', '--out', default=None, help="Defaults to overwrite -f FILE.")
args = parser.parse_args()
if args.out is None:
args.out = args.file
input_filename = args.file
output_filename = args.out
is_inplace = samefile(input_filename, output_filename)
backup_input_filename = input_filename + '.backup'
shutil.copyfile(input_filename, backup_input_filename)
if not is_inplace:
backup_output_filename = output_filename + '.backup'
shutil.copyfile(output_filename, backup_output_filename)
copy_to_on_completion = None
if is_inplace:
real_output_filename = output_filename
output_filename = output_filename + '.temp_outfile'
with open(input_filename) as f, open(output_filename, 'w') as outf:
try:
process_timesheet(f=f, outf=outf, verbose=args.verbose, invoice=args.invoice)
success = True
except Exception as exc:
logger.exception("Crash while processing timesheet.")
success = False
if success:
if is_inplace:
shutil.copyfile(output_filename, real_output_filename)
os.unlink(output_filename)
print("Success!")
else:
print("Crash while processing timesheet. The input failed to process (but is unharmed).")
|
|
# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
"""Contains interfaces for basic database building blocks"""
__all__ = ( 'ObjectDBR', 'ObjectDBW', 'RootPathDB', 'CompoundDB', 'CachingDB',
'TransportDB', 'ConfigurationMixin', 'RepositoryPathsMixin',
'RefSpec', 'FetchInfo', 'PushInfo', 'ReferencesMixin', 'SubmoduleDB',
'IndexDB', 'HighLevelRepository')
class ObjectDBR(object):
"""Defines an interface for object database lookup.
Objects are identified either by their 20 byte bin sha"""
def __contains__(self, sha):
return self.has_obj(sha)
#{ Query Interface
def has_object(self, sha):
"""
:return: True if the object identified by the given 20 bytes
binary sha is contained in the database"""
raise NotImplementedError("To be implemented in subclass")
def has_object_async(self, reader):
"""Return a reader yielding information about the membership of objects
as identified by shas
:param reader: Reader yielding 20 byte shas.
:return: async.Reader yielding tuples of (sha, bool) pairs which indicate
whether the given sha exists in the database or not"""
raise NotImplementedError("To be implemented in subclass")
def info(self, sha):
""" :return: OInfo instance
:param sha: bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
def info_async(self, reader):
"""Retrieve information of a multitude of objects asynchronously
:param reader: Channel yielding the sha's of the objects of interest
:return: async.Reader yielding OInfo|InvalidOInfo, in any order"""
raise NotImplementedError("To be implemented in subclass")
def stream(self, sha):
""":return: OStream instance
:param sha: 20 bytes binary sha
:raise BadObject:"""
raise NotImplementedError("To be implemented in subclass")
def stream_async(self, reader):
"""Retrieve the OStream of multiple objects
:param reader: see ``info``
:param max_threads: see ``ObjectDBW.store``
:return: async.Reader yielding OStream|InvalidOStream instances in any order
:note: depending on the system configuration, it might not be possible to
read all OStreams at once. Instead, read them individually using reader.read(x)
where x is small enough."""
raise NotImplementedError("To be implemented in subclass")
def size(self):
""":return: amount of objects in this database"""
raise NotImplementedError()
def sha_iter(self):
"""Return iterator yielding 20 byte shas for all objects in this data base"""
raise NotImplementedError()
def partial_to_complete_sha_hex(self, partial_hexsha):
"""
:return: 20 byte binary sha1 from the given less-than-40 byte hexsha
:param partial_hexsha: hexsha with less than 40 byte
:raise AmbiguousObjectName: If multiple objects would match the given sha
:raies BadObject: If object was not found"""
raise NotImplementedError()
def partial_to_complete_sha(self, partial_binsha, canonical_length):
""":return: 20 byte sha as inferred by the given partial binary sha
:param partial_binsha: binary sha with less than 20 bytes
:param canonical_length: length of the corresponding canonical (hexadecimal) representation.
It is required as binary sha's cannot display whether the original hex sha
had an odd or even number of characters
:raise AmbiguousObjectName:
:raise BadObject: """
#} END query interface
class ObjectDBW(object):
"""Defines an interface to create objects in the database"""
#{ Edit Interface
def set_ostream(self, stream):
"""
Adjusts the stream to which all data should be sent when storing new objects
:param stream: if not None, the stream to use, if None the default stream
will be used.
:return: previously installed stream, or None if there was no override
:raise TypeError: if the stream doesn't have the supported functionality"""
raise NotImplementedError("To be implemented in subclass")
def ostream(self):
"""
:return: overridden output stream this instance will write to, or None
if it will write to the default stream"""
raise NotImplementedError("To be implemented in subclass")
def store(self, istream):
"""
Create a new object in the database
:return: the input istream object with its sha set to its corresponding value
:param istream: IStream compatible instance. If its sha is already set
to a value, the object will just be stored in the our database format,
in which case the input stream is expected to be in object format ( header + contents ).
:raise IOError: if data could not be written"""
raise NotImplementedError("To be implemented in subclass")
def store_async(self, reader):
"""
Create multiple new objects in the database asynchronously. The method will
return right away, returning an output channel which receives the results as
they are computed.
:return: Channel yielding your IStream which served as input, in any order.
The IStreams sha will be set to the sha it received during the process,
or its error attribute will be set to the exception informing about the error.
:param reader: async.Reader yielding IStream instances.
The same instances will be used in the output channel as were received
in by the Reader.
:note:As some ODB implementations implement this operation atomic, they might
abort the whole operation if one item could not be processed. Hence check how
many items have actually been produced."""
raise NotImplementedError("To be implemented in subclass")
#} END edit interface
class RootPathDB(object):
"""Provides basic facilities to retrieve files of interest"""
def __init__(self, root_path):
"""Initialize this instance to look for its files at the given root path
All subsequent operations will be relative to this path
:raise InvalidDBRoot:
:note: The base will not perform any accessablity checking as the base
might not yet be accessible, but become accessible before the first
access."""
try:
super(RootPathDB, self).__init__(root_path)
except TypeError:
pass
# END handle py 2.6
#{ Interface
def root_path(self):
""":return: path at which this db operates"""
raise NotImplementedError()
def db_path(self, rela_path):
"""
:return: the given relative path relative to our database root, allowing
to pontentially access datafiles
:param rela_path: if not None or '', the relative path will be appended
to the database root path. Otherwise you will obtain the database root path itself"""
raise NotImplementedError()
#} END interface
class CachingDB(object):
"""A database which uses caches to speed-up access"""
#{ Interface
def update_cache(self, force=False):
"""
Call this method if the underlying data changed to trigger an update
of the internal caching structures.
:param force: if True, the update must be performed. Otherwise the implementation
may decide not to perform an update if it thinks nothing has changed.
:return: True if an update was performed as something change indeed"""
# END interface
class CompoundDB(object):
"""A database which delegates calls to sub-databases.
They should usually be cached and lazy-loaded"""
#{ Interface
def databases(self):
""":return: tuple of database instances we use for lookups"""
raise NotImplementedError()
#} END interface
class IndexDB(object):
"""A database which provides a flattened index to all objects in its currently
active tree."""
@property
def index(self):
""":return: IndexFile compatible instance"""
raise NotImplementedError()
class RefSpec(object):
"""A refspec is a simple container which provides information about the way
something should be fetched or pushed. It requires to use symbols to describe
the actual objects which is done using reference names (or respective instances
which resolve to actual reference names)."""
__slots__ = ('source', 'destination', 'force')
def __init__(self, source, destination, force=False):
"""initalize the instance with the required values
:param source: reference name or instance. If None, the Destination
is supposed to be deleted."""
self.source = source
self.destination = destination
self.force = force
if self.destination is None:
raise ValueError("Destination must be set")
def __str__(self):
""":return: a git-style refspec"""
s = str(self.source)
if self.source is None:
s = ''
#END handle source
d = str(self.destination)
p = ''
if self.force:
p = '+'
#END handle force
res = "%s%s:%s" % (p, s, d)
def delete_destination(self):
return self.source is None
class RemoteProgress(object):
"""
Handler providing an interface to parse progress information emitted by git-push
and git-fetch and to dispatch callbacks allowing subclasses to react to the progress.
Subclasses should derive from this type.
"""
_num_op_codes = 7
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING = [1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN|END
OP_MASK = ~STAGE_MASK
#{ Subclass Interface
def line_dropped(self, line):
"""Called whenever a line could not be understood and was therefore dropped."""
pass
def update(self, op_code, cur_count, max_count=None, message='', input=''):
"""Called whenever the progress changes
:param op_code:
Integer allowing to be compared against Operation IDs and stage IDs.
Stage IDs are BEGIN and END. BEGIN will only be set once for each Operation
ID as well as END. It may be that BEGIN and END are set at once in case only
one progress message was emitted due to the speed of the operation.
Between BEGIN and END, none of these flags will be set
Operation IDs are all held within the OP_MASK. Only one Operation ID will
be active per call.
:param cur_count: Current absolute count of items
:param max_count:
The maximum count of items we expect. It may be None in case there is
no maximum number of items or if it is (yet) unknown.
:param message:
In case of the 'WRITING' operation, it contains the amount of bytes
transferred. It may possibly be used for other purposes as well.
:param input:
The actual input string that was used to parse the information from.
This is usually a line from the output of git-fetch, but really
depends on the implementation
You may read the contents of the current line in self._cur_line"""
pass
def __call__(self, message, input=''):
"""Same as update, but with a simpler interface which only provides the
message of the operation.
:note: This method will be called in addition to the update method. It is
up to you which one you implement"""
pass
#} END subclass interface
class PushInfo(object):
"""A type presenting information about the result of a push operation for exactly
one refspec
flags # bitflags providing more information about the result
local_ref # Reference pointing to the local reference that was pushed
# It is None if the ref was deleted.
remote_ref_string # path to the remote reference located on the remote side
remote_ref # Remote Reference on the local side corresponding to
# the remote_ref_string. It can be a TagReference as well.
old_commit_binsha # binary sha to commit at which the remote_ref was standing before we pushed
# it to local_ref.commit. Will be None if an error was indicated
summary # summary line providing human readable english text about the push
"""
__slots__ = tuple()
NEW_TAG, NEW_HEAD, NO_MATCH, REJECTED, REMOTE_REJECTED, REMOTE_FAILURE, DELETED, \
FORCED_UPDATE, FAST_FORWARD, UP_TO_DATE, ERROR = [ 1 << x for x in range(11) ]
class FetchInfo(object):
"""A type presenting information about the fetch operation on exactly one refspec
The following members are defined:
ref # name of the reference to the changed
# remote head or FETCH_HEAD. Implementations can provide
# actual class instance which convert to a respective string
flags # additional flags to be & with enumeration members,
# i.e. info.flags & info.REJECTED
# is 0 if ref is FETCH_HEAD
note # additional notes given by the fetch-pack implementation intended for the user
old_commit_binsha# if info.flags & info.FORCED_UPDATE|info.FAST_FORWARD,
# field is set to the previous location of ref as binary sha or None"""
__slots__ = tuple()
NEW_TAG, NEW_HEAD, HEAD_UPTODATE, TAG_UPDATE, REJECTED, FORCED_UPDATE, \
FAST_FORWARD, ERROR = [ 1 << x for x in range(8) ]
class TransportDB(object):
"""A database which allows to transport objects from and to different locations
which are specified by urls (location) and refspecs (what to transport,
see http://www.kernel.org/pub/software/scm/git/docs/git-fetch.html).
At the beginning of a transport operation, it will be determined which objects
have to be sent (either by this or by the other side).
Afterwards a pack with the required objects is sent (or received). If there is
nothing to send, the pack will be empty.
As refspecs involve symbolic names for references to be handled, we require
RefParse functionality. How this is done is up to the actual implementation."""
# The following variables need to be set by the derived class
#{ Interface
def fetch(self, url, refspecs, progress=None, **kwargs):
"""Fetch the objects defined by the given refspec from the given url.
:param url: url identifying the source of the objects. It may also be
a symbol from which the respective url can be resolved, like the
name of the remote. The implementation should allow objects as input
as well, these are assumed to resovle to a meaningful string though.
:param refspecs: iterable of reference specifiers or RefSpec instance,
identifying the references to be fetch from the remote.
:param progress: RemoteProgress derived instance which receives progress messages for user consumption or None
:param kwargs: may be used for additional parameters that the actual implementation could
find useful.
:return: List of FetchInfo compatible instances which provide information about what
was previously fetched, in the order of the input refspecs.
:note: even if the operation fails, one of the returned FetchInfo instances
may still contain errors or failures in only part of the refspecs.
:raise: if any issue occours during the transport or if the url is not
supported by the protocol.
"""
raise NotImplementedError()
def push(self, url, refspecs, progress=None, **kwargs):
"""Transport the objects identified by the given refspec to the remote
at the given url.
:param url: Decribes the location which is to receive the objects
see fetch() for more details
:param refspecs: iterable of refspecs strings or RefSpec instances
to identify the objects to push
:param progress: see fetch()
:param kwargs: additional arguments which may be provided by the caller
as they may be useful to the actual implementation
:todo: what to return ?
:raise: if any issue arises during transport or if the url cannot be handled"""
raise NotImplementedError()
@property
def remotes(self):
""":return: An IterableList of Remote objects allowing to access and manipulate remotes
:note: Remote objects can also be used for the actual push or fetch operation"""
raise NotImplementedError()
def remote(self, name='origin'):
""":return: Remote object with the given name
:note: it does not necessarily exist, hence this is just a more convenient way
to construct Remote objects"""
raise NotImplementedError()
#}end interface
#{ Utility Methods
def create_remote(self, name, url, **kwargs):
"""Create a new remote with the given name pointing to the given url
:return: Remote instance, compatible to the Remote interface"""
return Remote.create(self, name, url, **kwargs)
def delete_remote(self, remote):
"""Delete the given remote.
:param remote: a Remote instance"""
return Remote.remove(self, remote)
#} END utility methods
class ReferencesMixin(object):
"""Database providing reference objects which in turn point to database objects
like Commits or Tag(Object)s.
The returned types are compatible to the interfaces of the pure python
reference implementation in GitDB.ref"""
def resolve(self, name):
"""Resolve the given name into a binary sha. Valid names are as defined
in the rev-parse documentation http://www.kernel.org/pub/software/scm/git/docs/git-rev-parse.html
:return: binary sha matching the name
:raise AmbiguousObjectName:
:raise BadObject: """
raise NotImplementedError()
def resolve_object(self, name):
"""As ``resolve()``, but returns the Objecft instance pointed to by the
resolved binary sha
:return: Object instance of the correct type, e.g. shas pointing to commits
will be represented by a Commit object"""
raise NotImplementedError()
@property
def references(self):
""":return: iterable list of all Reference objects representing tags, heads
and remote references. This is the most general method to obtain any
references."""
raise NotImplementedError()
@property
def heads(self):
""":return: IterableList with HeadReference objects pointing to all
heads in the repository."""
raise NotImplementedError()
@property
def head(self):
""":return: HEAD Object pointing to the current head reference"""
raise NotImplementedError()
@property
def tags(self):
""":return: An IterableList of TagReferences or compatible items that
are available in this repo"""
raise NotImplementedError()
#{ Utility Methods
def tag(self, name):
""":return: Tag with the given name
:note: It does not necessarily exist, hence this is just a more convenient
way to construct TagReference objects"""
raise NotImplementedError()
def commit(self, rev=None):
"""The Commit object for the specified revision
:param rev: revision specifier, see git-rev-parse for viable options.
:return: Commit compatible object"""
raise NotImplementedError()
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree compatible objects
:note: Takes all arguments known to iter_commits method"""
raise NotImplementedError()
def tree(self, rev=None):
"""The Tree (compatible) object for the given treeish revision
Examples::
repo.tree(repo.heads[0])
:param rev: is a revision pointing to a Treeish ( being a commit or tree )
:return: ``git.Tree``
:note:
If you need a non-root level tree, find it by iterating the root tree. Otherwise
it cannot know about its path relative to the repository root and subsequent
operations might have unexpected results."""
raise NotImplementedError()
def iter_commits(self, rev=None, paths='', **kwargs):
"""A list of Commit objects representing the history of a given ref/commit
:parm rev:
revision specifier, see git-rev-parse for viable options.
If None, the active branch will be used.
:parm paths:
is an optional path or a list of paths to limit the returned commits to
Commits that do not contain that path or the paths will not be returned.
:parm kwargs:
Arguments to be passed to git-rev-list - common ones are
max_count and skip
:note: to receive only commits between two named revisions, use the
"revA..revB" revision specifier
:return: iterator yielding Commit compatible instances"""
raise NotImplementedError()
#} END utility methods
#{ Edit Methods
def create_head(self, path, commit='HEAD', force=False, logmsg=None ):
"""Create a new head within the repository.
:param commit: a resolvable name to the commit or a Commit or Reference instance the new head should point to
:param force: if True, a head will be created even though it already exists
Otherwise an exception will be raised.
:param logmsg: message to append to the reference log. If None, a default message
will be used
:return: newly created Head instances"""
raise NotImplementedError()
def delete_head(self, *heads):
"""Delete the given heads
:param heads: list of Head references that are to be deleted"""
raise NotImplementedError()
def create_tag(self, path, ref='HEAD', message=None, force=False):
"""Create a new tag reference.
:param path: name or path of the new tag.
:param ref: resolvable name of the reference or commit, or Commit or Reference
instance describing the commit the tag should point to.
:param message: message to be attached to the tag reference. This will
create an actual Tag object carrying the message. Otherwise a TagReference
will be generated.
:param force: if True, the Tag will be created even if another tag does already
exist at the given path. Otherwise an exception will be thrown
:return: TagReference object """
raise NotImplementedError()
def delete_tag(self, *tags):
"""Delete the given tag references
:param tags: TagReferences to delete"""
raise NotImplementedError()
#}END edit methods
class RepositoryPathsMixin(object):
"""Represents basic functionality of a full git repository. This involves an
optional working tree, a git directory with references and an object directory.
This type collects the respective paths and verifies the provided base path
truly is a git repository.
If the underlying type provides the config_reader() method, we can properly determine
whether this is a bare repository as well. Otherwise it will make an educated guess
based on the path name."""
#{ Subclass Interface
def _initialize(self, path):
"""initialize this instance with the given path. It may point to
any location within the repositories own data, as well as the working tree.
The implementation will move up and search for traces of a git repository,
which is indicated by a child directory ending with .git or the
current path portion ending with .git.
The paths made available for query are suitable for full git repositories
only. Plain object databases need to be fed the "objects" directory path.
:param path: the path to initialize the repository with
It is a path to either the root git directory or the bare git repo::
repo = Repo("/Users/mtrier/Development/git-python")
repo = Repo("/Users/mtrier/Development/git-python.git")
repo = Repo("~/Development/git-python.git")
repo = Repo("$REPOSITORIES/Development/git-python.git")
:raise InvalidDBRoot:
"""
raise NotImplementedError()
#} end subclass interface
#{ Object Interface
def __eq__(self, rhs):
raise NotImplementedError()
def __ne__(self, rhs):
raise NotImplementedError()
def __hash__(self):
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
#} END object interface
#{ Interface
@property
def is_bare(self):
""":return: True if this is a bare repository
:note: this value is cached upon initialization"""
raise NotImplementedError()
@property
def git_dir(self):
""":return: path to directory containing this actual git repository (which
in turn provides access to objects and references"""
raise NotImplementedError()
@property
def working_tree_dir(self):
""":return: path to directory containing the working tree checkout of our
git repository.
:raise AssertionError: If this is a bare repository"""
raise NotImplementedError()
@property
def objects_dir(self):
""":return: path to the repository's objects directory"""
raise NotImplementedError()
@property
def working_dir(self):
""":return: working directory of the git process or related tools, being
either the working_tree_dir if available or the git_path"""
raise NotImplementedError()
@property
def description(self):
""":return: description text associated with this repository or set the
description."""
raise NotImplementedError()
#} END interface
class ConfigurationMixin(object):
"""Interface providing configuration handler instances, which provide locked access
to a single git-style configuration file (ini like format, using tabs as improve readablity).
Configuration readers can be initialized with multiple files at once, whose information is concatenated
when reading. Lower-level files overwrite values from higher level files, i.e. a repository configuration file
overwrites information coming from a system configuration file
:note: for the 'repository' config level, a git_path() compatible type is required"""
config_level = ("system", "global", "repository")
#{ Interface
def config_reader(self, config_level=None):
"""
:return:
GitConfigParser allowing to read the full git configuration, but not to write it
The configuration will include values from the system, user and repository
configuration files.
:param config_level:
For possible values, see config_writer method
If None, all applicable levels will be used. Specify a level in case
you know which exact file you whish to read to prevent reading multiple files for
instance
:note: On windows, system configuration cannot currently be read as the path is
unknown, instead the global path will be used."""
raise NotImplementedError()
def config_writer(self, config_level="repository"):
"""
:return:
GitConfigParser allowing to write values of the specified configuration file level.
Config writers should be retrieved, used to change the configuration ,and written
right away as they will lock the configuration file in question and prevent other's
to write it.
:param config_level:
One of the following values
system = sytem wide configuration file
global = user level configuration file
repository = configuration file for this repostory only"""
raise NotImplementedError()
#} END interface
class SubmoduleDB(object):
"""Interface providing access to git repository submodules.
The actual implementation is found in the Submodule object type, which is
currently only available in one implementation."""
@property
def submodules(self):
"""
:return: git.IterableList(Submodule, ...) of direct submodules
available from the current head"""
raise NotImplementedError()
def submodule(self, name):
""" :return: Submodule with the given name
:raise ValueError: If no such submodule exists"""
raise NotImplementedError()
def create_submodule(self, *args, **kwargs):
"""Create a new submodule
:note: See the documentation of Submodule.add for a description of the
applicable parameters
:return: created submodules"""
raise NotImplementedError()
def iter_submodules(self, *args, **kwargs):
"""An iterator yielding Submodule instances, see Traversable interface
for a description of args and kwargs
:return: Iterator"""
raise NotImplementedError()
def submodule_update(self, *args, **kwargs):
"""Update the submodules, keeping the repository consistent as it will
take the previous state into consideration. For more information, please
see the documentation of RootModule.update"""
raise NotImplementedError()
class HighLevelRepository(object):
"""An interface combining several high-level repository functionality and properties"""
@property
def daemon_export(self):
""":return: True if the repository may be published by the git-daemon"""
raise NotImplementedError()
def is_dirty(self, index=True, working_tree=True, untracked_files=False):
"""
:return:
``True``, the repository is considered dirty. By default it will react
like a git-status without untracked files, hence it is dirty if the
index or the working copy have changes."""
raise NotImplementedError()
@property
def untracked_files(self):
"""
:return:
list(str,...)
:note:
ignored files will not appear here, i.e. files mentioned in .gitignore.
Bare repositories never have untracked files"""
raise NotImplementedError()
def blame(self, rev, file):
"""The blame information for the given file at the given revision.
:parm rev: revision specifier, see git-rev-parse for viable options.
:return:
list: [Commit, list: [<line>]]
A list of tuples associating a Commit object with a list of lines that
changed within the given commit. The Commit objects will be given in order
of appearance."""
raise NotImplementedError()
@classmethod
def init(cls, path=None, mkdir=True):
"""Initialize a git repository at the given path if specified
:param path:
is the full path to the repo (traditionally ends with /<name>.git)
or None in which case the repository will be created in the current
working directory
:parm mkdir:
if specified will create the repository directory if it doesn't
already exists. Creates the directory with a mode=0755.
Only effective if a path is explicitly given
:return: Instance pointing to the newly created repository with similar capabilities
of this class"""
raise NotImplementedError()
def clone(self, path, progress = None):
"""Create a clone from this repository.
:param path:
is the full path of the new repo (traditionally ends with ./<name>.git).
:param progress:
a RemoteProgress instance or None if no progress information is required
:return: ``git.Repo`` (the newly cloned repo)"""
raise NotImplementedError()
@classmethod
def clone_from(cls, url, to_path, progress = None):
"""Create a clone from the given URL
:param url: valid git url, see http://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
:param to_path: Path to which the repository should be cloned to
:param progress:
a RemoteProgress instance or None if no progress information is required
:return: instance pointing to the cloned directory with similar capabilities as this class"""
raise NotImplementedError()
def archive(self, ostream, treeish=None, prefix=None):
"""Archive the tree at the given revision.
:parm ostream: file compatible stream object to which the archive will be written
:parm treeish: is the treeish name/id, defaults to active branch
:parm prefix: is the optional prefix to prepend to each filename in the archive
:parm kwargs:
Additional arguments passed to git-archive
NOTE: Use the 'format' argument to define the kind of format. Use
specialized ostreams to write any format supported by python
:return: self"""
raise NotImplementedError()
|
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
Javelin is meant to be used as a way to validate quickly that resources can
survive an upgrade process.
Authentication
--------------
Javelin will be creating (and removing) users and tenants so it needs the admin
credentials of your cloud to operate properly. The corresponding info can be
given the usual way, either through CLI options or environment variables.
You're probably familiar with these, but just in case::
+----------+------------------+----------------------+
| Param | CLI | Environment Variable |
+----------+------------------+----------------------+
| Username | --os-username | OS_USERNAME |
| Password | --os-password | OS_PASSWORD |
| Tenant | --os-tenant-name | OS_TENANT_NAME |
+----------+------------------+----------------------+
Runtime Arguments
-----------------
**-m/--mode**: (Required) Has to be one of 'check', 'create' or 'destroy'. It
indicates which actions javelin is going to perform.
**-r/--resources**: (Required) The path to a YAML file describing the resources
used by Javelin.
**-d/--devstack-base**: (Required) The path to the devstack repo used to
retrieve artefacts (like images) that will be referenced in the resource files.
**-c/--config-file**: (Optional) The path to a valid Tempest config file
describing your cloud. Javelin may use this to determine if certain services
are enabled and modify its behavior accordingly.
Resource file
-------------
The resource file is a valid YAML file describing the resources that will be
created, checked and destroyed by javelin. Here's a canonical example of a
resource file::
tenants:
- javelin
- discuss
users:
- name: javelin
pass: gungnir
tenant: javelin
- name: javelin2
pass: gungnir2
tenant: discuss
# resources that we want to create
images:
- name: javelin_cirros
owner: javelin
file: cirros-0.3.2-x86_64-blank.img
format: ami
aki: cirros-0.3.2-x86_64-vmlinuz
ari: cirros-0.3.2-x86_64-initrd
servers:
- name: peltast
owner: javelin
flavor: m1.small
image: javelin_cirros
- name: hoplite
owner: javelin
flavor: m1.medium
image: javelin_cirros
An important piece of the resource definition is the *owner* field, which is
the user (that we've created) that is the owner of that resource. All
operations on that resource will happen as that regular user to ensure that
admin level access does not mask issues.
The check phase will act like a unit test, using well known assert methods to
verify that the correct resources exist.
"""
import argparse
import collections
import datetime
import os
import sys
import unittest
import netaddr
from tempest_lib import exceptions as lib_exc
import yaml
import tempest.auth
from tempest import config
from tempest.openstack.common import log as logging
from tempest.openstack.common import timeutils
from tempest.services.compute.json import flavors_client
from tempest.services.compute.json import security_groups_client
from tempest.services.compute.json import servers_client
from tempest.services.identity.json import identity_client
from tempest.services.image.v2.json import image_client
from tempest.services.network.json import network_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.json import volumes_client
CONF = config.CONF
OPTS = {}
USERS = {}
RES = collections.defaultdict(list)
LOG = None
JAVELIN_START = datetime.datetime.utcnow()
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
compute_params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
compute_params.update(default_params)
object_storage_params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
object_storage_params.update(default_params)
_creds = tempest.auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
_auth = tempest.auth.KeystoneV2AuthProvider(_creds, CONF.identity.uri)
self.identity = identity_client.IdentityClientJSON(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.servers = servers_client.ServersClientJSON(_auth,
**compute_params)
self.flavors = flavors_client.FlavorsClientJSON(_auth,
**compute_params)
self.secgroups = security_groups_client.SecurityGroupsClientJSON(
_auth, **compute_params)
self.objects = object_client.ObjectClient(_auth,
**object_storage_params)
self.containers = container_client.ContainerClient(
_auth, **object_storage_params)
self.images = image_client.ImageClientV2JSON(_auth)
self.telemetry = telemetry_client.TelemetryClientJSON(
_auth,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**default_params_with_timeout_values)
self.volumes = volumes_client.VolumesClientJSON(
_auth,
CONF.volume.catalog_type,
CONF.volume.region or CONF.identity.region,
endpoint_type=CONF.volume.endpoint_type,
build_interval=CONF.volume.build_interval,
build_timeout=CONF.volume.build_timeout,
**default_params)
self.networks = network_client.NetworkClientJSON(
_auth,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**default_params)
def load_resources(fname):
"""Load the expected resources from a yaml file."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
body = admin.identity.list_tenants()
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.identity.create_tenant(tenant)
else:
LOG.warn("Tenant '%s' already exists in this environment" % tenant)
def destroy_tenants(tenants):
admin = keystone_admin()
for tenant in tenants:
tenant_id = admin.identity.get_tenant_by_name(tenant)['id']
admin.identity.delete_tenant(tenant_id)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user):
admin = keystone_admin()
roles = admin.identity.list_roles()
role = next(r for r in roles if r['name'] == 'Member')
LOG.debug(USERS[user])
try:
admin.identity.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except lib_exc.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
except lib_exc.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
admin.identity.get_user_by_username(tenant['id'], u['name'])
LOG.warn("User '%s' already exists in this environment"
% u['name'])
except lib_exc.NotFound:
admin.identity.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def destroy_users(users):
admin = keystone_admin()
for user in users:
tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
user_id = admin.identity.get_user_by_username(tenant_id,
user['name'])['id']
admin.identity.delete_user(user_id)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = admin.identity.get_tenant_by_name(u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = admin.identity.get_user_by_username(tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def _ping_ip(self, ip_addr, count, namespace=None):
if namespace is None:
ping_cmd = "ping -c1 " + ip_addr
else:
ping_cmd = "sudo ip netns exec %s ping -c1 %s" % (namespace,
ip_addr)
for current in range(count):
return_code = os.system(ping_cmd)
if return_code is 0:
break
self.assertNotEqual(current, count - 1,
"Server is not pingable at %s" % ip_addr)
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
self.check_volumes()
self.check_telemetry()
self.check_secgroups()
# validate neutron is enabled and ironic disabled:
# Tenant network isolation is not supported when using ironic.
# "admin" has set up a neutron flat network environment within a shared
# fixed network for all tenants to use.
# In this case, network/subnet/router creation can be skipped and the
# server booted the same as nova network.
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
self.check_networking()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in self.users.iteritems():
client = keystone_admin()
found = client.identity.get_user(user['id'])
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
resp, body = client.servers.list_servers()
self.assertEqual(resp['status'], '200')
def check_objects(self):
"""Check that the objects created are still there."""
if not self.res.get('objects'):
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if not self.res.get('servers'):
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
found = client.servers.get_server(found['id'])
# validate neutron is enabled and ironic disabled:
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
for network_name, body in found['addresses'].items():
for addr in body:
ip = addr['addr']
if addr.get('OS-EXT-IPS:type', 'fixed') == 'fixed':
namespace = _get_router_namespace(client,
network_name)
self._ping_ip(ip, 60, namespace)
else:
self._ping_ip(ip, 60)
else:
addr = found['addresses']['private'][0]['addr']
self._ping_ip(addr, 60)
def check_secgroups(self):
"""Check that the security groups still exist."""
LOG.info("Checking security groups")
for secgroup in self.res['secgroups']:
client = client_for_user(secgroup['owner'])
found = _get_resource_by_name(client.secgroups, 'security_groups',
secgroup['name'])
self.assertIsNotNone(
found,
"Couldn't find expected secgroup %s" % secgroup['name'])
def check_telemetry(self):
"""Check that ceilometer provides a sane sample.
Confirm that there are more than one sample and that they have the
expected metadata.
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
if not self.res.get('telemetry'):
return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
body = client.telemetry.list_samples(
'instance',
query=('metadata.display_name', 'eq', server['name'])
)
self.assertTrue(len(body) >= 1, 'expecting at least one sample')
self._confirm_telemetry_sample(server, body[-1])
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if not self.res.get('volumes'):
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = client.volumes.get_attachment_from_volume(vol_body)
self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
"""Check this sample matches the expected resource metadata."""
# Confirm display_name
self.assertEqual(server['name'],
sample['resource_metadata']['display_name'])
# Confirm instance_type of flavor
flavor = sample['resource_metadata'].get(
'flavor.name',
sample['resource_metadata'].get('instance_type')
)
self.assertEqual(server['flavor'], flavor)
# Confirm the oldest sample was created before upgrade.
if OPTS.mode == 'check':
oldest_timestamp = timeutils.normalize_time(
timeutils.parse_isotime(sample['timestamp']))
self.assertTrue(
oldest_timestamp < JAVELIN_START,
'timestamp should come before start of second javelin run'
)
def check_networking(self):
"""Check that the networks are still there."""
for res_type in ('networks', 'subnets', 'routers'):
for res in self.res[res_type]:
client = client_for_user(res['owner'])
found = _get_resource_by_name(client.networks, res_type,
res['name'])
self.assertIsNotNone(
found,
"Couldn't find expected resource %s" % res['name'])
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
_assign_swift_role(obj['owner'])
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
def destroy_objects(objects):
for obj in objects:
client = client_for_user(obj['owner'])
r, body = client.objects.delete_object(obj['container'], obj['name'])
if not (200 <= int(r['status']) < 299):
raise ValueError("unable to destroy object: [%s] %s" % (r, body))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def _get_image_by_name(client, name):
body = client.images.image_list()
for image in body:
if name == image['name']:
return image
return None
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# only upload a new image if the name isn't there
if _get_image_by_name(client, image['name']):
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
body = client.images.create_image(
image['name'], image['format'], image['format'], **extras)
image_id = body.get('id')
client.images.store_image(image_id, open(fname, 'r'))
def destroy_images(images):
if not images:
return
LOG.info("Destroying images")
for image in images:
client = client_for_user(image['owner'])
response = _get_image_by_name(client, image['name'])
if not response:
LOG.info("Image '%s' does not exists" % image['name'])
continue
client.images.delete_image(response['id'])
#######################
#
# NETWORKS
#
#######################
def _get_router_namespace(client, network):
network_id = _get_resource_by_name(client.networks,
'networks', network)['id']
n_body = client.networks.list_routers()
for router in n_body['routers']:
router_id = router['id']
r_body = client.networks.list_router_interfaces(router_id)
for port in r_body['ports']:
if port['network_id'] == network_id:
return "qrouter-%s" % router_id
def _get_resource_by_name(client, resource, name):
get_resources = getattr(client, 'list_%s' % resource)
if get_resources is None:
raise AttributeError("client doesn't have method list_%s" % resource)
# Until all tempest client methods are changed to return only one value,
# we cannot assume they all have the same signature so we need to discard
# the unused response first value it two values are being returned.
body = get_resources()
if type(body) == tuple:
body = body[1]
if isinstance(body, dict):
body = body[resource]
for res in body:
if name == res['name']:
return res
raise ValueError('%s not found in %s resources' % (name, resource))
def create_networks(networks):
LOG.info("Creating networks")
for network in networks:
client = client_for_user(network['owner'])
# only create a network if the name isn't here
body = client.networks.list_networks()
if any(item['name'] == network['name'] for item in body['networks']):
LOG.warning("Dupplicated network name: %s" % network['name'])
continue
client.networks.create_network(name=network['name'])
def destroy_networks(networks):
LOG.info("Destroying subnets")
for network in networks:
client = client_for_user(network['owner'])
network_id = _get_resource_by_name(client.networks, 'networks',
network['name'])['id']
client.networks.delete_network(network_id)
def create_subnets(subnets):
LOG.info("Creating subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
network = _get_resource_by_name(client.networks, 'networks',
subnet['network'])
ip_version = netaddr.IPNetwork(subnet['range']).version
# ensure we don't overlap with another subnet in the network
try:
client.networks.create_subnet(network_id=network['id'],
cidr=subnet['range'],
name=subnet['name'],
ip_version=ip_version)
except lib_exc.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
def destroy_subnets(subnets):
LOG.info("Destroying subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet['name'])['id']
client.networks.delete_subnet(subnet_id)
def create_routers(routers):
LOG.info("Creating routers")
for router in routers:
client = client_for_user(router['owner'])
# only create a router if the name isn't here
body = client.networks.list_routers()
if any(item['name'] == router['name'] for item in body['routers']):
LOG.warning("Dupplicated router name: %s" % router['name'])
continue
client.networks.create_router(router['name'])
def destroy_routers(routers):
LOG.info("Destroying routers")
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
client.networks.remove_router_interface_with_subnet_id(router_id,
subnet_id)
client.networks.delete_router(router_id)
def add_router_interface(routers):
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
# connect routers to their subnets
client.networks.add_router_interface_with_subnet_id(router_id,
subnet_id)
# connect routers to exteral network if set to "gateway"
if router['gateway']:
if CONF.network.public_network_id:
ext_net = CONF.network.public_network_id
client.networks._update_router(
router_id, set_enable_snat=True,
external_gateway_info={"network_id": ext_net})
else:
raise ValueError('public_network_id is not configured.')
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
r, body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_flavor_by_name(client, name):
body = client.flavors.list_flavors()
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
# validate neutron is enabled and ironic disabled
kwargs = dict()
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled and server.get('networks')):
get_net_id = lambda x: (_get_resource_by_name(
client.networks, 'networks', x)['id'])
kwargs['networks'] = [{'uuid': get_net_id(network)}
for network in server['networks']]
body = client.servers.create_server(
server['name'], image_id, flavor_id, **kwargs)
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
# create to security group(s) after server spawning
for secgroup in server['secgroups']:
client.servers.add_security_group(server_id, secgroup)
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
client.servers.delete_server(response['id'])
client.servers.wait_for_server_termination(response['id'],
ignore_error=True)
def create_secgroups(secgroups):
LOG.info("Creating security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
# only create a security group if the name isn't here
# i.e. a security group may be used by another server
# only create a router if the name isn't here
body = client.secgroups.list_security_groups()
if any(item['name'] == secgroup['name'] for item in body):
LOG.warning("Security group '%s' already exists" %
secgroup['name'])
continue
body = client.secgroups.create_security_group(
secgroup['name'], secgroup['description'])
secgroup_id = body['id']
# for each security group, create the rules
for rule in secgroup['rules']:
ip_proto, from_port, to_port, cidr = rule.split()
client.secgroups.create_security_group_rule(
secgroup_id, ip_proto, from_port, to_port, cidr=cidr)
def destroy_secgroups(secgroups):
LOG.info("Destroying security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
sg_id = _get_resource_by_name(client.secgroups,
'security_groups',
secgroup['name'])
# sg rules are deleted automatically
client.secgroups.delete_security_group(sg_id['id'])
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
body = client.volumes.list_volumes()
for volume in body:
if name == volume['display_name']:
return volume
return None
def create_volumes(volumes):
if not volumes:
return
LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
if _get_volume_by_name(client, volume['name']):
LOG.info("volume '%s' already exists" % volume['name'])
continue
size = volume['gb']
v_name = volume['name']
body = client.volumes.create_volume(size=size,
display_name=v_name)
client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
client.volumes.detach_volume(volume_id)
client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
volume_id = _get_volume_by_name(client, volume['name'])['id']
device = volume['device']
client.volumes.attach_volume(volume_id, server_id, device)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for those.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
# validate neutron is enabled and ironic is disabled
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
create_networks(RES['networks'])
create_subnets(RES['subnets'])
create_routers(RES['routers'])
add_router_interface(RES['routers'])
create_secgroups(RES['secgroups'])
create_servers(RES['servers'])
create_volumes(RES['volumes'])
attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
destroy_servers(RES['servers'])
destroy_images(RES['images'])
destroy_objects(RES['objects'])
destroy_volumes(RES['volumes'])
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
destroy_routers(RES['routers'])
destroy_subnets(RES['subnets'])
destroy_networks(RES['networks'])
destroy_secgroups(RES['secgroups'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
LOG.warn("Destroy mode incomplete")
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging():
global LOG
logging.setup(__name__)
LOG = logging.getLogger(__name__)
def main():
global RES
get_options()
setup_logging()
RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
import numpy as np
from histomicstk.utils import gradient_diffusion
def gvf_tracking(I, Mask, K=1000, Diffusions=10, Mu=5, Lambda=5, Iterations=10,
dT=0.05):
"""
Performs gradient-field tracking to segment smoothed images of cell nuclei.
Takes as input a smoothed intensity or Laplacian-of-Gaussian filtered image
and a foreground mask, and groups pixels by tracking them to mutual
gradient sinks. Typically requires merging of sinks (seeds) as a post
processing steps.
Parameters
----------
I : array_like
Smoothed intensity or log-filtered response where nuclei regions have
larger intensity values than background.
Mask : array_like
Binary mask where foreground objects have value 1, and background
objects have value 0. Used to restrict influence of background vectors
on diffusion process and to reduce tracking computations.
K : float
Number of steps to check for tracking cycle. Default value = 1000.
Mu : float
Weight parmeter from Navier-Stokes diffusion - weights divergence and
Laplacian terms. Default value = 5.
Lambda : float
Weight parameter from Navier-Stokes diffusion - used to weight
divergence. Default value = 5.
Iterations : float
Number of time-steps to use in Navier-Stokes diffusion. Default value =
10.
dT : float
Timestep to be used in Navier-Stokes diffusion. Default value = 0.05.
Returns
-------
Segmentation : array_like
Label image where positive values correspond to foreground pixels that
share mutual sinks.
Sinks : array_like
N x 2 array containing the (x,y) locations of the tracking sinks. Each
row is an (x,y) pair - in that order.
See Also
--------
histomicstk.utils.gradient_diffusion,
histomicstk.segmentation.label.shuffle
References
----------
.. [#] G. Li et al "3D cell nuclei segmentation based on gradient flow
tracking" in BMC Cell Biology,vol.40,no.8, 2007.
"""
# get image shape
M = I.shape[0]
N = I.shape[1]
# calculate gradient
dy, dx = np.gradient(I)
# diffusion iterations
if Diffusions > 0:
dx, dy = gradient_diffusion(dx, dy, Mask, Mu, Lambda, Diffusions,
dT)
# normalize to unit magnitude
Mag = ((dx**2 + dy**2)**0.5 + np.finfo(float).eps)
dy = dy / Mag
dx = dx / Mag
# define mask to track pixels that are mapped to a sink
Mapped = np.zeros(I.shape)
# define label image
Segmentation = np.zeros(I.shape)
# initialize lists of sinks
Sinks = []
# define coordinates for foreground pixels (Mask == 1)
i, j = np.nonzero(Mask)
# track pixels
for index, (x, y) in enumerate(zip(j, i)):
# initialize angle, trajectory length, novel flag, and allocation count
phi = 0
points = 1
novel = 1
alloc = 1
# initialize trajectory
Trajectory = np.zeros((K, 2))
Trajectory[0, 0] = x
Trajectory[0, 1] = y
# track while angle defined by successive steps is < np.pi / 2
while(phi < np.pi / 2):
# calculate step
xStep = round_float(dx[Trajectory[points-1, 1],
Trajectory[points-1, 0]])
yStep = round_float(dy[Trajectory[points-1, 1],
Trajectory[points-1, 0]])
# check image edge
if ((Trajectory[points-1, 0] + xStep < 0) or
(Trajectory[points-1, 0] + xStep > N-1) or
(Trajectory[points-1, 1] + yStep < 0) or
(Trajectory[points-1, 1] + yStep > M-1)):
break
# add new point to trajectory list
if points < K: # buffer is not overrun
Trajectory[points, 0] = Trajectory[points-1, 0] + xStep
Trajectory[points, 1] = Trajectory[points-1, 1] + yStep
else: # buffer overrun
# check for cycle
cycle = detect_cycle(Trajectory, points)
if cycle == points: # no cycle, simple overflow. grow buffer.
# copy and reallocate
temp = Trajectory
Trajectory = np.zeros((K*alloc, 2))
Trajectory[K*(alloc-1):K*alloc, ] = temp
alloc += 1
# add new point
Trajectory[points, 0] = Trajectory[points-1, 0] + xStep
Trajectory[points, 1] = Trajectory[points-1, 1] + yStep
else: # overflow due to cycle, terminate tracking
points = cycle
# check mapping
if Mapped[Trajectory[points, 1], Trajectory[points, 0]] == 1:
novel = 0
phi = np.pi
elif Mask[Trajectory[points, 1], Trajectory[points, 0]] == 0:
phi = np.pi
else:
phi = np.arccos(dy[Trajectory[points-1, 1],
Trajectory[points-1, 0]] *
dy[Trajectory[points, 1],
Trajectory[points, 0]] +
dx[Trajectory[points-1, 1],
Trajectory[points-1, 0]] *
dx[Trajectory[points, 1],
Trajectory[points, 0]])
# increment trajectory length counter
points += 1
# determine if sink is novel
if novel == 1:
# record sinks
Sinks.append(Trajectory[points-1, ])
# add trajectory to label image with new sink value, add mapping
for j in range(points):
Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = len(Sinks)
Mapped[Trajectory[j, 1], Trajectory[j, 0]] = 1
else:
# add trajectory to label image with sink value of final point
for j in range(points):
Segmentation[Trajectory[j, 1], Trajectory[j, 0]] = \
Segmentation[Trajectory[points-1, 1],
Trajectory[points-1, 0]]
# convert Sinks to numpy array
Sinks = np.asarray(Sinks)
return Segmentation, Sinks
def merge_sinks(Label, Sinks, Radius=5):
"""
Merges attraction basins obtained from gradient flow tracking using
sink locations.
Parameters
----------
Segmentation : array_like
Label image where positive values correspond to foreground pixels that
share mutual sinks.
Sinks : array_like
N x 2 array containing the (x,y) locations of the tracking sinks. Each
row is an (x,y) pair - in that order.
Radius : float
Radius used to merge sinks. Sinks closer than this radius to one
another will have their regions of attraction merged.
Default value = 5.
Returns
-------
Merged : array_like
Label image where attraction regions are merged.
"""
import skimage.morphology as mp
from skimage import measure as ms
# build seed image
SeedImage = np.zeros(Label.shape)
for i in range(Sinks.shape[0]):
SeedImage[Sinks[i, 1], Sinks[i, 0]] = i+1
# dilate sink image
Dilated = mp.binary_dilation(SeedImage, mp.disk(Radius))
# generate new labels for merged seeds, define memberships
Labels = ms.label(Dilated)
New = Labels[Sinks[:, 1].astype(np.int), Sinks[:, 0].astype(np.int)]
# get unique list of seed clusters
Unique = np.arange(1, New.max()+1)
# generate new seed list
Merged = np.zeros(Label.shape)
# get pixel list for each sink object
Props = ms.regionprops(Label.astype(np.int))
# fill in new values
for i in Unique:
Indices = np.nonzero(New == i)[0]
for j in Indices:
Coords = Props[j].coords
Merged[Coords[:, 0], Coords[:, 1]] = i
return Merged
def detect_cycle(Trajectory, points):
# initialize trajectory length
length = 0
# identify trajectory bounding box
xMin = np.min(Trajectory[0:points, 0])
xMax = np.max(Trajectory[0:points, 0])
xRange = xMax - xMin + 1
yMin = np.min(Trajectory[0:points, 1])
yMax = np.max(Trajectory[0:points, 1])
yRange = yMax - yMin + 1
# fill in trajectory map
Map = np.zeros((yRange, xRange))
for i in range(points):
if Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] == 1:
break
else:
Map[Trajectory[i, 1]-yMin, Trajectory[i, 0]-xMin] = 1
length += 1
return length
def round_float(x):
if x >= 0.0:
t = np.ceil(x)
if t - x > 0.5:
t -= 1.0
return t
else:
t = np.ceil(-x)
if t + x > 0.5:
t -= 1.0
return -t
|
|
import os
from contextlib import contextmanager
from unittest import mock
import pytest
import great_expectations.exceptions as gee
from great_expectations.data_context.util import (
PasswordMasker,
parse_substitution_variable,
secretmanager,
substitute_value_from_aws_secrets_manager,
substitute_value_from_azure_keyvault,
substitute_value_from_gcp_secret_manager,
substitute_value_from_secret_store,
)
from great_expectations.util import load_class
def test_load_class_raises_error_when_module_not_found():
with pytest.raises(gee.PluginModuleNotFoundError):
load_class("foo", "bar")
def test_load_class_raises_error_when_class_not_found():
with pytest.raises(gee.PluginClassNotFoundError):
load_class("TotallyNotARealClass", "great_expectations.datasource")
def test_load_class_raises_error_when_class_name_is_None():
with pytest.raises(TypeError):
load_class(None, "great_expectations.datasource")
def test_load_class_raises_error_when_class_name_is_not_string():
for bad_input in [1, 1.3, ["a"], {"foo": "bar"}]:
with pytest.raises(TypeError):
load_class(bad_input, "great_expectations.datasource")
def test_load_class_raises_error_when_module_name_is_None():
with pytest.raises(TypeError):
load_class("foo", None)
def test_load_class_raises_error_when_module_name_is_not_string():
for bad_input in [1, 1.3, ["a"], {"foo": "bar"}]:
with pytest.raises(TypeError):
load_class(bad_input, "great_expectations.datasource")
def test_password_masker_mask_db_url(monkeypatch, tmp_path):
"""
What does this test and why?
The PasswordMasker.mask_db_url() should mask passwords consistently in database urls. The output of mask_db_url should be the same whether user_urlparse is set to True or False.
This test uses database url examples from
https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
"""
# PostgreSQL (if installed in test environment)
# default
db_hostname = os.getenv("GE_TEST_LOCAL_DB_HOSTNAME", "localhost")
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql://scott:***@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql://scott:***@{db_hostname}:65432/mydatabase"
)
# missing port number, using urlparse
assert (
PasswordMasker.mask_db_url(
f"postgresql://scott:tiger@{db_hostname}/mydatabase", use_urlparse=True
)
== f"postgresql://scott:***@{db_hostname}/mydatabase"
)
# psycopg2 (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql+psycopg2://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+psycopg2://scott:***@{db_hostname}:65432/mydatabase"
)
# pg8000 (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase"
)
== f"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"postgresql+pg8000://scott:tiger@{db_hostname}:65432/mydatabase",
use_urlparse=True,
)
== f"postgresql+pg8000://scott:***@{db_hostname}:65432/mydatabase"
)
# MySQL
# default (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(f"mysql://scott:tiger@{db_hostname}:65432/foo")
== f"mysql://scott:***@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql://scott:***@{db_hostname}:65432/foo"
)
# mysqlclient (a maintained fork of MySQL-Python) (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+mysqldb://scott:***@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql+mysqldb://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+mysqldb://scott:***@{db_hostname}:65432/foo"
)
# PyMySQL (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo"
)
== f"mysql+pymysql://scott:***@{db_hostname}:65432/foo"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mysql+pymysql://scott:tiger@{db_hostname}:65432/foo", use_urlparse=True
)
== f"mysql+pymysql://scott:***@{db_hostname}:65432/foo"
)
# Oracle (if installed in test environment)
url_host = os.getenv("GE_TEST_LOCALHOST_URL", "127.0.0.1")
try:
assert (
PasswordMasker.mask_db_url(f"oracle://scott:tiger@{url_host}:1521/sidname")
== f"oracle://scott:***@{url_host}:1521/sidname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"oracle://scott:tiger@{url_host}:1521/sidname", use_urlparse=True
)
== f"oracle://scott:***@{url_host}:1521/sidname"
)
try:
assert (
PasswordMasker.mask_db_url("oracle+cx_oracle://scott:tiger@tnsname")
== "oracle+cx_oracle://scott:***@tnsname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
"oracle+cx_oracle://scott:tiger@tnsname", use_urlparse=True
)
== "oracle+cx_oracle://scott:***@tnsname"
)
# Microsoft SQL Server
# pyodbc (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url("mssql+pyodbc://scott:tiger@mydsn")
== "mssql+pyodbc://scott:***@mydsn"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
"mssql+pyodbc://scott:tiger@mydsn", use_urlparse=True
)
== "mssql+pyodbc://scott:***@mydsn"
)
# pymssql (if installed in test environment)
try:
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname"
)
== f"mssql+pymssql://scott:***@{db_hostname}:12345/dbname"
)
except ModuleNotFoundError:
pass
assert (
PasswordMasker.mask_db_url(
f"mssql+pymssql://scott:tiger@{db_hostname}:12345/dbname", use_urlparse=True
)
== f"mssql+pymssql://scott:***@{db_hostname}:12345/dbname"
)
# SQLite
# relative path
temp_dir = tmp_path / "sqllite_tests"
temp_dir.mkdir()
monkeypatch.chdir(temp_dir)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db")
== f"sqlite:///something/foo.db"
)
assert (
PasswordMasker.mask_db_url(f"sqlite:///something/foo.db", use_urlparse=True)
== f"sqlite:///something/foo.db"
)
# absolute path
# Unix/Mac - 4 initial slashes in total
assert (
PasswordMasker.mask_db_url("sqlite:////absolute/path/to/foo.db")
== "sqlite:////absolute/path/to/foo.db"
)
assert (
PasswordMasker.mask_db_url(
"sqlite:////absolute/path/to/foo.db", use_urlparse=True
)
== "sqlite:////absolute/path/to/foo.db"
)
# Windows
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db")
== "sqlite:///C:\\path\\to\\foo.db"
)
assert (
PasswordMasker.mask_db_url("sqlite:///C:\\path\\to\\foo.db", use_urlparse=True)
== "sqlite:///C:\\path\\to\\foo.db"
)
# Windows alternative using raw string
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db")
== r"sqlite:///C:\path\to\foo.db"
)
assert (
PasswordMasker.mask_db_url(r"sqlite:///C:\path\to\foo.db", use_urlparse=True)
== r"sqlite:///C:\path\to\foo.db"
)
# in-memory
assert PasswordMasker.mask_db_url("sqlite://") == "sqlite://"
assert PasswordMasker.mask_db_url("sqlite://", use_urlparse=True) == "sqlite://"
def test_parse_substitution_variable():
"""
What does this test and why?
Ensure parse_substitution_variable works as expected.
Returns:
"""
assert parse_substitution_variable("${SOME_VAR}") == "SOME_VAR"
assert parse_substitution_variable("$SOME_VAR") == "SOME_VAR"
assert parse_substitution_variable("SOME_STRING") is None
assert parse_substitution_variable("SOME_$TRING") is None
assert parse_substitution_variable("${some_var}") == "some_var"
assert parse_substitution_variable("$some_var") == "some_var"
assert parse_substitution_variable("some_string") is None
assert parse_substitution_variable("some_$tring") is None
assert parse_substitution_variable("${SOME_$TRING}") is None
assert parse_substitution_variable("$SOME_$TRING") == "SOME_"
@contextmanager
def does_not_raise():
yield
@pytest.mark.parametrize(
"input_value,method_to_patch,return_value",
[
("any_value", None, "any_value"),
("secret|any_value", None, "secret|any_value"),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
"great_expectations.data_context.util.substitute_value_from_aws_secrets_manager",
"success",
),
(
"secret|projects/project_id/secrets/my_secret",
"great_expectations.data_context.util.substitute_value_from_gcp_secret_manager",
"success",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my_secret",
"great_expectations.data_context.util.substitute_value_from_azure_keyvault",
"success",
),
],
)
def test_substitute_value_from_secret_store(input_value, method_to_patch, return_value):
if method_to_patch:
with mock.patch(method_to_patch, return_value=return_value):
assert substitute_value_from_secret_store(value=input_value) == return_value
else:
assert substitute_value_from_secret_store(value=input_value) == return_value
class MockedBoto3Client:
def __init__(self, secret_response):
self.secret_response = secret_response
def get_secret_value(self, *args, **kwargs):
return self.secret_response
class MockedBoto3Session:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self):
return self
def client(self, *args, **kwargs):
return MockedBoto3Client(self.secret_response)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
{"SecretString": "value"},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret",
{"SecretBinary": b"dmFsdWU="},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret|key",
{"SecretString": '{"key": "value"}'},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret|key",
{"SecretBinary": b"eyJrZXkiOiAidmFsdWUifQ=="},
does_not_raise(),
"value",
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|arn:aws:secretsmanager:region-name-1:123456789012:secret:my-secret:000000000-0000-0000-0000-00000000000|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_aws_secrets_manager(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.boto3.session.Session",
return_value=MockedBoto3Session(secret_response),
):
assert substitute_value_from_aws_secrets_manager(input_value) == expected
class MockedSecretManagerServiceClient:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self):
return self
def access_secret_version(self, *args, **kwargs):
class Response:
pass
response = Response()
response._pb = Response()
response._pb.payload = Response()
response._pb.payload.data = self.secret_response
return response
@pytest.mark.skipif(
secretmanager is None,
reason="Could not import 'secretmanager' from google.cloud in data_context.util",
)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|projects/project_id/secrets/my_secret",
b"value",
does_not_raise(),
"value",
),
(
"secret|projects/project_id/secrets/my_secret|key",
b'{"key": "value"}',
does_not_raise(),
"value",
),
(
"secret|projects/project_id/secrets/my_se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|projects/project_id/secrets/my_secret/version/A|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_gcp_secret_manager(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.secretmanager.SecretManagerServiceClient",
return_value=MockedSecretManagerServiceClient(secret_response),
):
assert substitute_value_from_gcp_secret_manager(input_value) == expected
class MockedSecretClient:
def __init__(self, secret_response):
self.secret_response = secret_response
def __call__(self, *args, **kwargs):
return self
def get_secret(self, *args, **kwargs):
class Response:
pass
response = Response()
response.value = self.secret_response
return response
@mock.patch("great_expectations.data_context.util.DefaultAzureCredential", new=object)
@pytest.mark.parametrize(
"input_value,secret_response,raises,expected",
[
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-secret",
"value",
does_not_raise(),
"value",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-secret|key",
'{"key": "value"}',
does_not_raise(),
"value",
),
(
"secret|https://my-vault-name.vault.azure.net/secrets/my-se%&et|key",
None,
pytest.raises(ValueError),
None,
),
(
"secret|https://my_vault_name.vault.azure.net/secrets/my-secret/A0000000000000000000000000000000|key",
None,
pytest.raises(ValueError),
None,
),
],
)
def test_substitute_value_from_azure_keyvault(
input_value, secret_response, raises, expected
):
with raises:
with mock.patch(
"great_expectations.data_context.util.SecretClient",
return_value=MockedSecretClient(secret_response),
):
assert substitute_value_from_azure_keyvault(input_value) == expected
|
|
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_pool."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
import os
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import descriptor_pool_test1_pb2
from google.protobuf.internal import descriptor_pool_test2_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf.internal import file_options_test_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import symbol_database
class DescriptorPoolTest(unittest.TestCase):
def setUp(self):
self.pool = descriptor_pool.DescriptorPool()
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(self.factory_test1_fd)
self.pool.Add(self.factory_test2_fd)
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual(name1, file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual(name2, file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEqual('Factory1Message', msg1.name)
self.assertEqual('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEqual(None, msg1.containing_type)
self.assertFalse(msg1.has_options)
nested_msg1 = msg1.nested_types[0]
self.assertEqual('NestedFactory1Message', nested_msg1.name)
self.assertEqual(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEqual('NestedFactory1Enum', nested_enum1.name)
self.assertEqual(msg1, nested_enum1.containing_type)
self.assertEqual(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEqual(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEqual('Factory2Message', msg2.name)
self.assertEqual('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEqual('NestedFactory2Message', nested_msg2.name)
self.assertEqual(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEqual('NestedFactory2Enum', nested_enum2.name)
self.assertEqual(msg2, nested_enum2.containing_type)
self.assertEqual(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEqual(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEqual(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEqual(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEqual(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEqual(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEqual(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEqual(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEqual(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEqual(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
self.assertFalse(enum1.has_options)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEqual(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEqual(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEqual(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEqual(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testFindFieldByName(self):
field = self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory1Message.list_value')
self.assertEqual(field.name, 'list_value')
self.assertEqual(field.label, field.LABEL_REPEATED)
self.assertFalse(field.has_options)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindExtensionByName(self):
# An extension defined in a message.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.another_field')
self.assertEqual(extension.name, 'another_field')
self.assertEqual(extension.number, 1002)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindAllExtensions(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
extensions = self.pool.FindAllExtensions(factory1_message)
expected_extension_numbers = set([one_more_field, another_field])
self.assertEqual(expected_extension_numbers, set(extensions))
# Verify that mutating the returned list does not affect the pool.
extensions.append('unexpected_element')
# Get the extensions again, the returned value does not contain the
# 'unexpected_element'.
extensions = self.pool.FindAllExtensions(factory1_message)
self.assertEqual(expected_extension_numbers, set(extensions))
def testFindExtensionByNumber(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
# An extension defined in a message.
extension = self.pool.FindExtensionByNumber(factory1_message, 1001)
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByNumber(factory1_message, 1002)
self.assertEqual(extension.name, 'another_field')
with self.assertRaises(KeyError):
extension = self.pool.FindExtensionByNumber(factory1_message, 1234567)
def testExtensionsAreNotFields(self):
with self.assertRaises(KeyError):
self.pool.FindFieldByName('google.protobuf.python.internal.another_field')
with self.assertRaises(KeyError):
self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
with self.assertRaises(KeyError):
self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory1Message.list_value')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testAddSerializedFile(self):
self.pool = descriptor_pool.DescriptorPool()
self.pool.AddSerializedFile(self.factory_test1_fd.SerializeToString())
self.pool.AddSerializedFile(self.factory_test2_fd.SerializeToString())
self.testFindMessageTypeByName()
def testComplexNesting(self):
more_messages_desc = descriptor_pb2.FileDescriptorProto.FromString(
more_messages_pb2.DESCRIPTOR.serialized_pb)
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(more_messages_desc)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
def testEnumDefaultValue(self):
"""Test the default value of enums which don't start at zero."""
def _CheckDefaultValue(file_descriptor):
default_value = (file_descriptor
.message_types_by_name['DescriptorPoolTest1']
.fields_by_name['nested_enum']
.default_value)
self.assertEqual(default_value,
descriptor_pool_test1_pb2.DescriptorPoolTest1.BETA)
# First check what the generated descriptor contains.
_CheckDefaultValue(descriptor_pool_test1_pb2.DESCRIPTOR)
# Then check the generated pool. Normally this is the same descriptor.
file_descriptor = symbol_database.Default().pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
self.assertIs(file_descriptor, descriptor_pool_test1_pb2.DESCRIPTOR)
_CheckDefaultValue(file_descriptor)
# Then check the dynamic pool and its internal DescriptorDatabase.
descriptor_proto = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(descriptor_proto)
# And do the same check as above
file_descriptor = self.pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
_CheckDefaultValue(file_descriptor)
def testDefaultValueForCustomMessages(self):
"""Check the value returned by non-existent fields."""
def _CheckValueAndType(value, expected_value, expected_type):
self.assertEqual(value, expected_value)
self.assertIsInstance(value, expected_type)
def _CheckDefaultValues(msg):
try:
int64 = long
except NameError: # Python3
int64 = int
try:
unicode_type = unicode
except NameError: # Python3
unicode_type = str
_CheckValueAndType(msg.optional_int32, 0, int)
_CheckValueAndType(msg.optional_uint64, 0, (int64, int))
_CheckValueAndType(msg.optional_float, 0, (float, int))
_CheckValueAndType(msg.optional_double, 0, (float, int))
_CheckValueAndType(msg.optional_bool, False, bool)
_CheckValueAndType(msg.optional_string, u'', unicode_type)
_CheckValueAndType(msg.optional_bytes, b'', bytes)
_CheckValueAndType(msg.optional_nested_enum, msg.FOO, int)
# First for the generated message
_CheckDefaultValues(unittest_pb2.TestAllTypes())
# Then for a message built with from the DescriptorPool.
pool = descriptor_pool.DescriptorPool()
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
message_class = message_factory.MessageFactory(pool).GetPrototype(
pool.FindMessageTypeByName(
unittest_pb2.TestAllTypes.DESCRIPTOR.full_name))
_CheckDefaultValues(message_class())
class ProtoFile(object):
def __init__(self, name, package, messages, dependencies=None,
public_dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
self.public_dependencies = public_dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEqual(self.name, file_desc.name)
test.assertEqual(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
public_dependencies_names = [f.name for f in file_desc.public_dependencies]
test.assertEqual(self.public_dependencies, public_dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
class EnumType(object):
def __init__(self, values):
self.values = values
def CheckType(self, test, msg_desc, name, file_desc):
enum_desc = msg_desc.enum_types_by_name[name]
test.assertEqual(name, enum_desc.name)
expected_enum_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_enum_full_name, enum_desc.full_name)
test.assertEqual(msg_desc, enum_desc.containing_type)
test.assertEqual(file_desc, enum_desc.file)
for index, (value, number) in enumerate(self.values):
value_desc = enum_desc.values_by_name[value]
test.assertEqual(value, value_desc.name)
test.assertEqual(index, value_desc.index)
test.assertEqual(number, value_desc.number)
test.assertEqual(enum_desc, value_desc.type)
test.assertIn(value, msg_desc.enum_values_by_name)
class MessageType(object):
def __init__(self, type_dict, field_list, is_extendable=False,
extensions=None):
self.type_dict = type_dict
self.field_list = field_list
self.is_extendable = is_extendable
self.extensions = extensions or []
def CheckType(self, test, containing_type_desc, name, file_desc):
if containing_type_desc is None:
desc = file_desc.message_types_by_name[name]
expected_full_name = '.'.join([file_desc.package, name])
else:
desc = containing_type_desc.nested_types_by_name[name]
expected_full_name = '.'.join([containing_type_desc.full_name, name])
test.assertEqual(name, desc.name)
test.assertEqual(expected_full_name, desc.full_name)
test.assertEqual(containing_type_desc, desc.containing_type)
test.assertEqual(desc.file, file_desc)
test.assertEqual(self.is_extendable, desc.is_extendable)
for name, subtype in self.type_dict.items():
subtype.CheckType(test, desc, name, file_desc)
for index, (name, field) in enumerate(self.field_list):
field.CheckField(test, desc, name, index)
for index, (name, field) in enumerate(self.extensions):
field.CheckField(test, desc, name, index)
class EnumField(object):
def __init__(self, number, type_name, default_value):
self.number = number
self.type_name = type_name
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
enum_desc = msg_desc.enum_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(enum_desc.values_by_name[self.default_value].number,
field_desc.default_value)
test.assertFalse(enum_desc.values_by_name[self.default_value].has_options)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(enum_desc, field_desc.enum_type)
class MessageField(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
class StringField(object):
def __init__(self, number, default_value):
self.number = number
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.fields_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(self.default_value, field_desc.default_value)
class ExtensionField(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
class AddDescriptorTest(unittest.TestCase):
def _TestMessage(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes').full_name)
# AddDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage')
pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedMessage',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
# Files are implicitly also indexed when messages are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testMessage(self):
self._TestMessage('')
self._TestMessage('.')
def _TestEnum(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.ForeignEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum').full_name)
# AddEnumDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum.NestedEnum')
pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
# Files are implicitly also indexed when enums are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testEnum(self):
self._TestEnum('')
self._TestEnum('.')
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testFile(self):
pool = descriptor_pool.DescriptorPool()
pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
# AddFileDescriptor is not recursive; messages and enums within files must
# be explicitly registered.
with self.assertRaises(KeyError):
pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes')
def testEmptyDescriptorPool(self):
# Check that an empty DescriptorPool() contains no messages.
pool = descriptor_pool.DescriptorPool()
proto_file_name = descriptor_pb2.DESCRIPTOR.name
self.assertRaises(KeyError, pool.FindFileByName, proto_file_name)
# Add the above file to the pool
file_descriptor = descriptor_pb2.FileDescriptorProto()
descriptor_pb2.DESCRIPTOR.CopyToProto(file_descriptor)
pool.Add(file_descriptor)
# Now it exists.
self.assertTrue(pool.FindFileByName(proto_file_name))
def testCustomDescriptorPool(self):
# Create a new pool, and add a file descriptor.
pool = descriptor_pool.DescriptorPool()
file_desc = descriptor_pb2.FileDescriptorProto(
name='some/file.proto', package='package')
file_desc.message_type.add(name='Message')
pool.Add(file_desc)
self.assertEqual(pool.FindFileByName('some/file.proto').name,
'some/file.proto')
self.assertEqual(pool.FindMessageTypeByName('package.Message').name,
'Message')
def testFileDescriptorOptionsWithCustomDescriptorPool(self):
# Create a descriptor pool, and add a new FileDescriptorProto to it.
pool = descriptor_pool.DescriptorPool()
file_name = 'file_descriptor_options_with_custom_descriptor_pool.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto(name=file_name)
extension_id = file_options_test_pb2.foo_options
file_descriptor_proto.options.Extensions[extension_id].foo_name = 'foo'
pool.Add(file_descriptor_proto)
# The options set on the FileDescriptorProto should be available in the
# descriptor even if they contain extensions that cannot be deserialized
# using the pool.
file_descriptor = pool.FindFileByName(file_name)
options = file_descriptor.GetOptions()
self.assertEqual('foo', options.Extensions[extension_id].foo_name)
# The object returned by GetOptions() is cached.
self.assertIs(options, file_descriptor.GetOptions())
@unittest.skipIf(
api_implementation.Type() != 'cpp',
'default_pool is only supported by the C++ implementation')
class DefaultPoolTest(unittest.TestCase):
def testFindMethods(self):
# pylint: disable=g-import-not-at-top
from google.protobuf.pyext import _message
pool = _message.default_pool
self.assertIs(
pool.FindFileByName('google/protobuf/unittest.proto'),
unittest_pb2.DESCRIPTOR)
self.assertIs(
pool.FindMessageTypeByName('protobuf_unittest.TestAllTypes'),
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertIs(
pool.FindFieldByName('protobuf_unittest.TestAllTypes.optional_int32'),
unittest_pb2.TestAllTypes.DESCRIPTOR.fields_by_name['optional_int32'])
self.assertIs(
pool.FindExtensionByName('protobuf_unittest.optional_int32_extension'),
unittest_pb2.DESCRIPTOR.extensions_by_name['optional_int32_extension'])
self.assertIs(
pool.FindEnumTypeByName('protobuf_unittest.ForeignEnum'),
unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertIs(
pool.FindOneofByName('protobuf_unittest.TestAllTypes.oneof_field'),
unittest_pb2.TestAllTypes.DESCRIPTOR.oneofs_by_name['oneof_field'])
def testAddFileDescriptor(self):
# pylint: disable=g-import-not-at-top
from google.protobuf.pyext import _message
pool = _message.default_pool
file_desc = descriptor_pb2.FileDescriptorProto(name='some/file.proto')
pool.Add(file_desc)
pool.AddSerializedFile(file_desc.SerializeToString())
TEST1_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test1.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest1': MessageType({
'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ETA')),
('nested_field', StringField(2, 'theta')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')),
('nested_field', StringField(2, 'beta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'BETA')),
('nested_message', MessageField(2, 'NestedMessage')),
], is_extendable=True),
'DescriptorPoolTest2': MessageType({
'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'MU')),
('nested_field', StringField(2, 'lambda')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')),
('nested_field', StringField(2, 'delta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')),
('nested_message', MessageField(2, 'NestedMessage')),
]),
})
TEST2_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test2.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest3': MessageType({
'NestedEnum': EnumType([('NU', 13), ('XI', 14)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'RHO')),
('nested_field', StringField(2, 'sigma')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'PI')),
('nested_field', StringField(2, 'nu')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'XI')),
('nested_message', MessageField(2, 'NestedMessage')),
], extensions=[
('descriptor_pool_test',
ExtensionField(1001, 'DescriptorPoolTest1')),
]),
},
dependencies=['google/protobuf/internal/descriptor_pool_test1.proto',
'google/protobuf/internal/more_messages.proto'],
public_dependencies=['google/protobuf/internal/more_messages.proto'])
if __name__ == '__main__':
unittest.main()
|
|
"""
$url abema.tv
$type live, vod
$region Japan
"""
import hashlib
import hmac
import logging
import re
import struct
import time
import uuid
from base64 import urlsafe_b64encode
from binascii import unhexlify
from Crypto.Cipher import AES
from requests import Response
from requests.adapters import BaseAdapter
from streamlink.exceptions import NoStreamsError
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWriter
from streamlink.utils.url import update_qsd
log = logging.getLogger(__name__)
class AbemaTVHLSStreamWriter(HLSStreamWriter):
def should_filter_sequence(self, sequence):
return "/tsad/" in sequence.segment.uri or super().should_filter_sequence(sequence)
class AbemaTVHLSStreamReader(HLSStreamReader):
__writer__ = AbemaTVHLSStreamWriter
class AbemaTVHLSStream(HLSStream):
__reader__ = AbemaTVHLSStreamReader
class AbemaTVLicenseAdapter(BaseAdapter):
'''
Handling abematv-license:// protocol to get real video key_data.
'''
STRTABLE = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
HKEY = b"3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E"
_MEDIATOKEN_API = "https://api.abema.io/v1/media/token"
_LICENSE_API = "https://license.abema.io/abematv-hls"
_MEDIATOKEN_SCHEMA = validate.Schema({"token": validate.text})
_LICENSE_SCHEMA = validate.Schema({"k": validate.text,
"cid": validate.text})
def __init__(self, session, deviceid, usertoken):
self._session = session
self.deviceid = deviceid
self.usertoken = usertoken
super().__init__()
def _get_videokey_from_ticket(self, ticket):
params = {
"osName": "android",
"osVersion": "6.0.1",
"osLang": "ja_JP",
"osTimezone": "Asia/Tokyo",
"appId": "tv.abema",
"appVersion": "3.27.1"
}
auth_header = {"Authorization": "Bearer " + self.usertoken}
res = self._session.http.get(self._MEDIATOKEN_API, params=params,
headers=auth_header)
jsonres = self._session.http.json(res,
schema=self._MEDIATOKEN_SCHEMA)
mediatoken = jsonres['token']
res = self._session.http.post(self._LICENSE_API,
params={"t": mediatoken},
json={"kv": "a", "lt": ticket})
jsonres = self._session.http.json(res,
schema=self._LICENSE_SCHEMA)
cid = jsonres['cid']
k = jsonres['k']
res = sum([self.STRTABLE.find(k[i]) * (58 ** (len(k) - 1 - i))
for i in range(len(k))])
encvideokey = struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)
# HKEY:
# RC4KEY = unhexlify('DB98A8E7CECA3424D975280F90BD03EE')
# RC4DATA = unhexlify(b'D4B718BBBA9CFB7D0192A58F9E2D146A'
# b'FC5DB29E4352DE05FC4CF2C1005804BB')
# rc4 = ARC4.new(RC4KEY)
# HKEY = rc4.decrypt(RC4DATA)
h = hmac.new(unhexlify(self.HKEY),
(cid + self.deviceid).encode("utf-8"),
digestmod=hashlib.sha256)
enckey = h.digest()
aes = AES.new(enckey, AES.MODE_ECB)
rawvideokey = aes.decrypt(encvideokey)
return rawvideokey
def send(self, request, stream=False, timeout=None, verify=True, cert=None,
proxies=None):
resp = Response()
resp.status_code = 200
ticket = re.findall(r"abematv-license://(.*)", request.url)[0]
resp._content = self._get_videokey_from_ticket(ticket)
return resp
def close(self):
return
@pluginmatcher(re.compile(r"""
https?://abema\.tv/(
now-on-air/(?P<onair>[^?]+)
|
video/episode/(?P<episode>[^?]+)
|
channels/.+?/slots/(?P<slots>[^?]+)
)
""", re.VERBOSE))
class AbemaTV(Plugin):
_CHANNEL = "https://api.abema.io/v1/channels"
_USER_API = "https://api.abema.io/v1/users"
_PRGM_API = "https://api.abema.io/v1/video/programs/{0}"
_SLOTS_API = "https://api.abema.io/v1/media/slots/{0}"
_PRGM3U8 = "https://vod-abematv.akamaized.net/program/{0}/playlist.m3u8"
_SLOTM3U8 = "https://vod-abematv.akamaized.net/slot/{0}/playlist.m3u8"
SECRETKEY = (b"v+Gjs=25Aw5erR!J8ZuvRrCx*rGswhB&qdHd_SYerEWdU&a?3DzN9B"
b"Rbp5KwY4hEmcj5#fykMjJ=AuWz5GSMY-d@H7DMEh3M@9n2G552Us$$"
b"k9cD=3TxwWe86!x#Zyhe")
_USER_SCHEMA = validate.Schema({"profile": {"userId": validate.text},
"token": validate.text})
_CHANNEL_SCHEMA = validate.Schema({"channels": [{"id": validate.text,
"name": validate.text,
"playback": {validate.optional("dash"):
validate.text,
"hls": validate.text}}]})
_PRGM_SCHEMA = validate.Schema({"terms": [{validate.optional("onDemandType"): int}]})
_SLOT_SCHEMA = validate.Schema({"slot": {"flags": {validate.optional("timeshiftFree"): bool}}})
def __init__(self, url):
super().__init__(url)
self.session.http.headers.update({'User-Agent': useragents.CHROME})
def _generate_applicationkeysecret(self, deviceid):
deviceid = deviceid.encode("utf-8") # for python3
# plus 1 hour and drop minute and secs
# for python3 : floor division
ts_1hour = (int(time.time()) + 60 * 60) // 3600 * 3600
time_struct = time.gmtime(ts_1hour)
ts_1hour_str = str(ts_1hour).encode("utf-8")
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(self.SECRETKEY)
tmp = h.digest()
for i in range(time_struct.tm_mon):
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
tmp = h.digest()
for i in range(time_struct.tm_mday % 5):
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
tmp = h.digest()
for i in range(time_struct.tm_hour % 5): # utc hour
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
h.update(tmp)
tmp = h.digest()
return urlsafe_b64encode(tmp).rstrip(b"=").decode("utf-8")
def _is_playable(self, vtype, vid):
auth_header = {"Authorization": "Bearer " + self.usertoken}
if vtype == "episode":
res = self.session.http.get(self._PRGM_API.format(vid),
headers=auth_header)
jsonres = self.session.http.json(res, schema=self._PRGM_SCHEMA)
playable = False
for item in jsonres["terms"]:
if item.get("onDemandType", False) == 3:
playable = True
return playable
elif vtype == "slots":
res = self.session.http.get(self._SLOTS_API.format(vid),
headers=auth_header)
jsonres = self.session.http.json(res, schema=self._SLOT_SCHEMA)
return jsonres["slot"]["flags"].get("timeshiftFree", False) is True
def _get_streams(self):
deviceid = str(uuid.uuid4())
appkeysecret = self._generate_applicationkeysecret(deviceid)
json_data = {"deviceId": deviceid,
"applicationKeySecret": appkeysecret}
res = self.session.http.post(self._USER_API, json=json_data)
jsonres = self.session.http.json(res, schema=self._USER_SCHEMA)
self.usertoken = jsonres['token'] # for authorzation
matchresult = self.match
if matchresult.group("onair"):
onair = matchresult.group("onair")
if onair == "news-global":
self._CHANNEL = update_qsd(self._CHANNEL, {"division": "1"})
res = self.session.http.get(self._CHANNEL)
jsonres = self.session.http.json(res, schema=self._CHANNEL_SCHEMA)
channels = jsonres["channels"]
for channel in channels:
if onair == channel["id"]:
break
else:
raise NoStreamsError(self.url)
playlisturl = channel["playback"]["hls"]
elif matchresult.group("episode"):
episode = matchresult.group("episode")
if not self._is_playable("episode", episode):
log.error("Premium stream is not playable")
return {}
playlisturl = self._PRGM3U8.format(episode)
elif matchresult.group("slots"):
slots = matchresult.group("slots")
if not self._is_playable("slots", slots):
log.error("Premium stream is not playable")
return {}
playlisturl = self._SLOTM3U8.format(slots)
log.debug("URL={0}".format(playlisturl))
# hook abematv private protocol
self.session.http.mount("abematv-license://",
AbemaTVLicenseAdapter(self.session, deviceid,
self.usertoken))
return AbemaTVHLSStream.parse_variant_playlist(self.session, playlisturl)
__plugin__ = AbemaTV
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.keras._impl.keras import testing_utils
from tensorflow.python.platform import test
class SimpleRNNLayerTest(test.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
with self.test_session():
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
with self.test_session():
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
with self.test_session():
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((2, 3, 2))))
self.assertEqual(len(layer.losses), 4)
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
with self.test_session():
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
with self.test_session():
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
if __name__ == '__main__':
test.main()
|
|
"""
Copyright (c) 2016 Stepan Fedorko-Bartos, Ceegan Hale
Under MIT License - https://github.com/Step7750/ScheduleStorm/blob/master/LICENSE.md
This file is a resource for Schedule Storm - https://github.com/Step7750/ScheduleStorm
"""
from .University import University
import requests
import json
import time
import threading
from datetime import datetime
# Custom UWaterlooAPI request class
class UWaterlooAPI:
def __init__(self, log, api_key=None, output='.json'):
self.log = log
self.api_key = '?key=' + api_key
self.baseURL = "https://api.uwaterloo.ca/v2"
self.format = output
def request(self, path):
"""
General UWaterloo request function
:param path: **string** path for get request
:return: **dict** all info for parsing
"""
r = requests.get(self.baseURL + path + self.format + self.api_key, timeout=20)
# Checks id request was successful and returns info to be parsed
if r.status_code == requests.codes.ok:
return json.loads(r.text)['data']
else:
self.log.debug('Get request failed | ' + self.baseURL + path + self.format + self.api_key)
def term_subject_schedule(self, term, subject):
"""
Gets all UWaterloo courses based on term and subject
:param term: **string** term id
:param subject: **string** subject abbreviation
:return: **dict** all info for parsing
"""
path = '/terms/' + term + '/' + subject + '/schedule'
return self.request(path)
def terms(self):
"""
Gets a list of all UWaterloo terms
:return: **dict** all info for parsing
"""
path = '/terms/list'
return self.request(path)
def subject_codes(self):
"""
Gets all UWaterloo subjects
:return: **dict** all info for parsing
"""
path = '/codes/subjects'
return self.request(path)
def group_codes(self):
"""
Gets all UWaterloo faculties
:return: **dict** all info for parsing
"""
path = '/codes/groups'
return self.request(path)
def course_id(self, course_id):
"""
Gets UWaterloo course descriptions based on unique course_id
:param course_id: **string** unique number assigned to a course
:return: **dict** all info for parsing
"""
path = '/courses/' + course_id
return self.request(path)
def courses(self, subject):
"""
Gets UWaterloo class descriptions based on subject and unique catalog_number
:param subject: **string** subject abbreviation
:return: **dict** all info for parsing
"""
path = '/courses/' + subject
return self.request(path)
class UWaterloo(University):
def __init__(self, settings):
super().__init__(settings)
def scrapeCourseList(self, uw, term, subjectList):
"""
Scrape and parsing for courses
:param uw: **class object** UWaterlooapi class object
:param term: **string** term id
:param subjectList: **list** list of all subjects
:return:
"""
self.log.info('Scraping classes')
prevClass = ''
startType = ''
courseList = []
# For each subject scrape courses
for subject in subjectList:
# Gets all courses based on term and subject
for course in uw.term_subject_schedule(term, subject['subject']):
if course['catalog_number'] != prevClass or prevClass == '':
group = []
# initialize course dictionary
courseDict = {'coursenum': course['catalog_number'],
'subject': subject['subject'],
'term': term,
'id': course['class_number'],
'type': course['section'][:3],
'group': [],
'location': course['campus'],
'curEnroll': course['enrollment_capacity'],
'capEnroll': course['enrollment_total'],
'capwaitEnroll': course['waiting_capacity'],
'section': course['section'][4:],
'rooms': ['N/A'],
'times': ['N/A']}
# For each class initialize course dictionary to be upserted
for date in course['classes']:
# If location and building exists in the course info append to rooms
if 'location' in date and date['location']['building']:
if courseDict['rooms'][0] == 'N/A':
courseDict['rooms'].pop()
if 'room' in date['location']:
courseDict['rooms'].append(date['location']['building'] + " " + date['location']['room'])
else:
courseDict['rooms'].append(date['location']['building'])
# Checks if class is open, closed, or has a waiting list
if course['enrollment_capacity'] != 0 and course['enrollment_total']/course['enrollment_capacity'] >= 1:
# Checks waiting list and closed status
if course['waiting_capacity'] != 0:
courseDict['status'] = 'Wait List'
courseDict['waitEnroll'] = course['waiting_total']
else:
courseDict['status'] = 'Closed'
else:
courseDict['status'] = 'Open'
# Checks to see if class has a start and end time
if date['date']['start_time']:
if courseDict['times'][0] == 'N/A':
courseDict['times'].pop()
course_start_time = datetime.strptime(date['date']['start_time'], '%H:%M')
course_end_time = datetime.strptime(date['date']['end_time'], '%H:%M')
courseDict['times'].append(date['date']['weekdays'] + " " +
course_start_time.strftime('%I:%M%p') + ' - ' +
course_end_time.strftime('%I:%M%p'))
# Checks for assigned teacher
if date['instructors']:
teacher = date['instructors'][0].split(',')
courseDict['teachers'] = [teacher[1] + ' ' + teacher[0]]
else:
courseDict['teachers'] = ['N/A']
if prevClass != courseDict['coursenum'] or (prevClass == courseDict['coursenum'] and courseDict['type'] == startType):
startType = courseDict['type']
group.append(course['associated_class'])
courseDict['group'].append(course['associated_class'])
else:
if int(course['associated_class']) != 99:
courseDict['group'].append(course['associated_class'])
else:
courseDict['group'] = group
prevClass = courseDict['coursenum']
courseList.append(courseDict)
# Upserts class list
self.updateClasses(courseList)
def scrapeCourseDesc(self, subjectList, uw):
"""
Cycles through the subjectlist scraping course descriptions
:param subjectList: **List** list of all subjects
:param uw: **Class Object** UWaterlooapi class object
:return:
"""
self.log.info("Scraping course descriptions")
for subject in subjectList:
for course in uw.courses(subject['subject']):
if not self.getCourseDescription(course['catalog_number'], course['subject']):
threadm = CourseDescriptions(course['course_id'], super(), uw)
threadm.setDaemon(True)
threadm.start()
def scrapeTerms(self, uw):
"""
Scrapes and parses terms
:param uw: **class object** UWaterlooapi class object
:return:
"""
self.log.info("Scraping terms")
termDictList = []
# Gets all terms recorded in UWaterlooAPI
terms = uw.terms()
termList = terms['listings']
# For each term find previous, current, and next term
for term in termList:
for x in range(len(termList[term])):
# Checks for previous term
if termList[term][x]['id'] == terms['previous_term']:
termDict = {'id': terms['previous_term'], 'name': termList[term][x]['name']}
termDictList.append(termDict)
# Checks for current term
elif termList[term][x]['id'] == terms['current_term']:
termDict = {'id': terms['current_term'], 'name': termList[term][x]['name']}
termDictList.append(termDict)
# Checks for next term
elif termList[term][x]['id'] == terms['next_term']:
termDict = {'id': terms['next_term'], 'name': termList[term][x]['name']}
termDictList.append(termDict)
# Upserts all terms to be scraped
self.updateTerms(termDictList)
self.log.info('Finished scraping terms')
def updateFaculties(self, uw):
"""
Scrapes and parses faculties
:param uw: **class object** UWaterlooapi class object
:return subjectList: **list** list of all UWaterloo subjects
"""
self.log.info("Getting faculty list")
# Gets all faculty info
faculties = uw.group_codes()
subjectList = []
# For each subject match faculty
for subject in uw.subject_codes():
subjectDict = {'subject': subject['subject'], 'faculty': '', 'name': subject['description']}
for faculty in faculties:
if subject['group'] == faculty['group_code']:
subjectDict['faculty'] = faculty['group_full_name']
subjectList.append(subjectDict)
# Upserts all subjects at once
self.updateSubjects(subjectList)
self.log.info('Finished updating faculties')
# Returns a list of all subjects for future use
return subjectList
def scrape(self):
"""
Scraping function that obtains updated course info
:return:
"""
# Initializes UWaterlooAPI class
uw = UWaterlooAPI(self.log, api_key=self.settings['api_key'])
# Scrapes faculties and terms
subjectList = self.updateFaculties(uw)
self.scrapeTerms(uw)
terms = self.getTerms()
# For each term scrape course info
for term in terms:
self.log.info('Obtaining ' + terms[term] + ' course data with id ' + term)
self.scrapeCourseList(uw, term, subjectList)
self.scrapeCourseDesc(subjectList, uw)
self.log.info('Finished scraping for UWaterloo data')
class CourseDescriptions(threading.Thread):
"""
Mines course descriptions from the UWaterloo api given the subject and coursenum
"""
def __init__(self, course, parent, uw):
threading.Thread.__init__(self)
self.super = parent
self.course = course
self.uw = uw
def run(self):
# Gets class description
courseDesc = self.uw.course_id(self.course)
if len(courseDesc) != 0:
courseDict = {'coursenum': courseDesc['catalog_number'], 'subject': courseDesc['subject'],
'name': courseDesc['title'], 'desc': courseDesc['description'],
'units': courseDesc['units'], 'prereq': courseDesc['prerequisites'],
'coreq': courseDesc['corequisites'], 'antireq': courseDesc['antirequisites']}
if courseDesc['notes']:
note = courseDesc['notes'][7:]
note = note[:-1]
courseDict['notes'] = note
else:
courseDict = {'coursenum': courseDesc['catalog_number'], 'subject': courseDesc['subject']}
# Upserts class descriptions
self.super.updateCourseDesc(courseDict)
|
|
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Props made of a single primitive MuJoCo geom."""
import itertools
from dm_control import composer
from dm_control import mjcf
from dm_control.composer import define
from dm_control.composer.observation import observable
import numpy as np
_DEFAULT_HALF_LENGTHS = [0.05, 0.1, 0.15]
class Primitive(composer.Entity):
"""A primitive MuJoCo geom prop."""
def _build(self, geom_type, size, mass=None, name=None):
"""Initializes this prop.
Args:
geom_type: a string, one of the types supported by MuJoCo.
size: a list or numpy array of up to 3 numbers, depending on the type.
mass: The mass for the primitive geom.
name: (optional) A string, the name of this prop.
"""
size = np.reshape(np.asarray(size), -1)
self._mjcf_root = mjcf.element.RootElement(model=name)
self._geom = self._mjcf_root.worldbody.add(
'geom', name='body_geom', type=geom_type, size=size, mass=mass)
touch_sensor = self._mjcf_root.worldbody.add(
'site', type=geom_type, name='touch_sensor', size=size*1.05,
rgba=[1, 1, 1, 0.1], # touch sensor site is almost transparent
group=composer.SENSOR_SITES_GROUP)
self._touch = self._mjcf_root.sensor.add(
'touch', site=touch_sensor)
self._position = self._mjcf_root.sensor.add(
'framepos', name='position', objtype='geom', objname=self.geom)
self._orientation = self._mjcf_root.sensor.add(
'framequat', name='orientation', objtype='geom',
objname=self.geom)
self._linear_velocity = self._mjcf_root.sensor.add(
'framelinvel', name='linear_velocity', objtype='geom',
objname=self.geom)
self._angular_velocity = self._mjcf_root.sensor.add(
'frameangvel', name='angular_velocity', objtype='geom',
objname=self.geom)
self._name = name
def _build_observables(self):
return PrimitiveObservables(self)
@property
def geom(self):
"""Returns the primitive's geom, e.g., to change color or friction."""
return self._geom
@property
def touch(self):
"""Exposing the touch sensor for observations and reward."""
return self._touch
@property
def position(self):
"""Ground truth pos sensor."""
return self._position
@property
def orientation(self):
"""Ground truth angular position sensor."""
return self._orientation
@property
def linear_velocity(self):
"""Ground truth velocity sensor."""
return self._linear_velocity
@property
def angular_velocity(self):
"""Ground truth angular velocity sensor."""
return self._angular_velocity
@property
def mjcf_model(self):
return self._mjcf_root
@property
def name(self):
return self._name
class PrimitiveObservables(composer.Observables,
composer.FreePropObservableMixin):
"""Primitive entity's observables."""
@define.observable
def position(self):
return observable.MJCFFeature('sensordata', self._entity.position)
@define.observable
def orientation(self):
return observable.MJCFFeature('sensordata', self._entity.orientation)
@define.observable
def linear_velocity(self):
return observable.MJCFFeature('sensordata', self._entity.linear_velocity)
@define.observable
def angular_velocity(self):
return observable.MJCFFeature('sensordata', self._entity.angular_velocity)
@define.observable
def touch(self):
return observable.MJCFFeature('sensordata', self._entity.touch)
class Sphere(Primitive):
"""A class representing a sphere prop."""
def _build(self, radius=0.05, mass=None, name='sphere'):
super(Sphere, self)._build(
geom_type='sphere', size=radius, mass=mass, name=name)
class Box(Primitive):
"""A class representing a box prop."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or _DEFAULT_HALF_LENGTHS
super(Box, self)._build(geom_type='box',
size=half_lengths,
mass=mass,
name=name)
class BoxWithSites(Box):
"""A class representing a box prop with sites on the corners."""
def _build(self, half_lengths=None, mass=None, name='box'):
half_lengths = half_lengths or _DEFAULT_HALF_LENGTHS
super(BoxWithSites, self)._build(half_lengths=half_lengths, mass=mass,
name=name)
corner_positions = itertools.product([half_lengths[0], -half_lengths[0]],
[half_lengths[1], -half_lengths[1]],
[half_lengths[2], -half_lengths[2]])
corner_sites = []
for i, corner_pos in enumerate(corner_positions):
corner_sites.append(
self._mjcf_root.worldbody.add(
'site',
type='sphere',
name='corner_{}'.format(i),
size=[0.1],
pos=corner_pos,
rgba=[1, 0, 0, 1.0],
group=composer.SENSOR_SITES_GROUP))
self._corner_sites = tuple(corner_sites)
@property
def corner_sites(self):
return self._corner_sites
class Ellipsoid(Primitive):
"""A class representing an ellipsoid prop."""
def _build(self, radii=None, mass=None, name='ellipsoid'):
radii = radii or _DEFAULT_HALF_LENGTHS
super(Ellipsoid, self)._build(geom_type='ellipsoid',
size=radii,
mass=mass,
name=name)
class Cylinder(Primitive):
"""A class representing a cylinder prop."""
def _build(self, radius=0.05, half_length=0.15, mass=None, name='cylinder'):
super(Cylinder, self)._build(geom_type='cylinder',
size=[radius, half_length],
mass=mass,
name=name)
class Capsule(Primitive):
"""A class representing a capsule prop."""
def _build(self, radius=0.05, half_length=0.15, mass=None, name='capsule'):
super(Capsule, self)._build(geom_type='capsule',
size=[radius, half_length],
mass=mass,
name=name)
|
|
# -*- coding: utf-8 -*-
"""
celery.app.defaults
~~~~~~~~~~~~~~~~~~~
Configuration introspection and defaults.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
from collections import deque
from datetime import timedelta
from ..utils.functional import memoize
is_jython = sys.platform.startswith("java")
is_pypy = hasattr(sys, "pypy_version_info")
DEFAULT_POOL = "processes"
if is_jython:
DEFAULT_POOL = "threads"
elif is_pypy:
if sys.pypy_version_info[0:3] < (1, 5, 0):
DEFAULT_POOL = "solo"
else:
DEFAULT_POOL = "processes"
DEFAULT_PROCESS_LOG_FMT = """
[%(asctime)s: %(levelname)s/%(processName)s] %(message)s
""".strip()
DEFAULT_LOG_FMT = '[%(asctime)s: %(levelname)s] %(message)s'
DEFAULT_TASK_LOG_FMT = """[%(asctime)s: %(levelname)s/%(processName)s] \
%(task_name)s[%(task_id)s]: %(message)s"""
def str_to_bool(term, table={"false": False, "no": False, "0": False,
"true": True, "yes": True, "1": True}):
try:
return table[term.lower()]
except KeyError:
raise TypeError("Can't coerce %r to type bool" % (term, ))
class Option(object):
alt = None
deprecate_by = None
remove_by = None
typemap = dict(string=str, int=int, float=float, any=lambda v: v,
bool=str_to_bool, dict=dict, tuple=tuple)
def __init__(self, default=None, *args, **kwargs):
self.default = default
self.type = kwargs.get("type") or "string"
for attr, value in kwargs.iteritems():
setattr(self, attr, value)
def to_python(self, value):
return self.typemap[self.type](value)
NAMESPACES = {
"BROKER": {
"URL": Option(None, type="string"),
"HOST": Option(None, type="string"),
"PORT": Option(type="int"),
"USER": Option(None, type="string"),
"PASSWORD": Option(None, type="string"),
"VHOST": Option(None, type="string"),
"CONNECTION_TIMEOUT": Option(4, type="float"),
"CONNECTION_RETRY": Option(True, type="bool"),
"CONNECTION_MAX_RETRIES": Option(100, type="int"),
"POOL_LIMIT": Option(10, type="int"),
"INSIST": Option(False, type="bool",
deprecate_by="2.4", remove_by="3.0"),
"USE_SSL": Option(False, type="bool"),
"TRANSPORT": Option(None, type="string"),
"TRANSPORT_OPTIONS": Option({}, type="dict"),
},
"CASSANDRA": {
"COLUMN_FAMILY": Option(None, type="string"),
"DETAILED_MODE": Option(False, type="bool"),
"KEYSPACE": Option(None, type="string"),
"READ_CONSISTENCY": Option(None, type="string"),
"SERVERS": Option(None, type="list"),
"WRITE_CONSISTENCY": Option(None, type="string"),
},
"CELERY": {
"ACKS_LATE": Option(False, type="bool"),
"ALWAYS_EAGER": Option(False, type="bool"),
"AMQP_TASK_RESULT_EXPIRES": Option(type="float",
deprecate_by="2.5", remove_by="3.0",
alt="CELERY_TASK_RESULT_EXPIRES"),
"AMQP_TASK_RESULT_CONNECTION_MAX": Option(1, type="int",
remove_by="2.5", alt="BROKER_POOL_LIMIT"),
"ANNOTATIONS": Option(type="any"),
"BROADCAST_QUEUE": Option("celeryctl"),
"BROADCAST_EXCHANGE": Option("celeryctl"),
"BROADCAST_EXCHANGE_TYPE": Option("fanout"),
"CACHE_BACKEND": Option(),
"CACHE_BACKEND_OPTIONS": Option({}, type="dict"),
"CREATE_MISSING_QUEUES": Option(True, type="bool"),
"DEFAULT_RATE_LIMIT": Option(type="string"),
"DISABLE_RATE_LIMITS": Option(False, type="bool"),
"DEFAULT_ROUTING_KEY": Option("celery"),
"DEFAULT_QUEUE": Option("celery"),
"DEFAULT_EXCHANGE": Option("celery"),
"DEFAULT_EXCHANGE_TYPE": Option("direct"),
"DEFAULT_DELIVERY_MODE": Option(2, type="string"),
"EAGER_PROPAGATES_EXCEPTIONS": Option(False, type="bool"),
"ENABLE_UTC": Option(False, type="bool"),
"EVENT_SERIALIZER": Option("json"),
"IMPORTS": Option((), type="tuple"),
"IGNORE_RESULT": Option(False, type="bool"),
"MAX_CACHED_RESULTS": Option(5000, type="int"),
"MESSAGE_COMPRESSION": Option(None, type="string"),
"MONGODB_BACKEND_SETTINGS": Option(None, type="dict"),
"REDIS_HOST": Option(None, type="string"),
"REDIS_PORT": Option(None, type="int"),
"REDIS_DB": Option(None, type="int"),
"REDIS_PASSWORD": Option(None, type="string"),
"REDIS_MAX_CONNECTIONS": Option(None, type="int"),
"RESULT_BACKEND": Option(None, type="string"),
"RESULT_DB_SHORT_LIVED_SESSIONS": Option(False, type="bool"),
"RESULT_DBURI": Option(),
"RESULT_ENGINE_OPTIONS": Option(None, type="dict"),
"RESULT_EXCHANGE": Option("celeryresults"),
"RESULT_EXCHANGE_TYPE": Option("direct"),
"RESULT_SERIALIZER": Option("pickle"),
"RESULT_PERSISTENT": Option(False, type="bool"),
"ROUTES": Option(None, type="any"),
"SEND_EVENTS": Option(False, type="bool"),
"SEND_TASK_ERROR_EMAILS": Option(False, type="bool"),
"SEND_TASK_SENT_EVENT": Option(False, type="bool"),
"STORE_ERRORS_EVEN_IF_IGNORED": Option(False, type="bool"),
"TASK_ERROR_WHITELIST": Option((), type="tuple",
deprecate_by="2.5", remove_by="3.0"),
"TASK_PUBLISH_RETRY": Option(True, type="bool"),
"TASK_PUBLISH_RETRY_POLICY": Option({
"max_retries": 100,
"interval_start": 0,
"interval_max": 1,
"interval_step": 0.2}, type="dict"),
"TASK_RESULT_EXPIRES": Option(timedelta(days=1), type="float"),
"TASK_SERIALIZER": Option("pickle"),
"TIMEZONE": Option(None, type="string"),
"TRACK_STARTED": Option(False, type="bool"),
"REDIRECT_STDOUTS": Option(True, type="bool"),
"REDIRECT_STDOUTS_LEVEL": Option("WARNING"),
"QUEUES": Option(None, type="dict"),
"SECURITY_KEY": Option(None, type="string"),
"SECURITY_CERTIFICATE": Option(None, type="string"),
"SECURITY_CERT_STORE": Option(None, type="string"),
},
"CELERYD": {
"AUTOSCALER": Option("celery.worker.autoscale.Autoscaler"),
"AUTORELOADER": Option("celery.worker.autoreload.Autoreloader"),
"BOOT_STEPS": Option((), type="tuple"),
"CONCURRENCY": Option(0, type="int"),
"ETA_SCHEDULER": Option(None, type="string"),
"ETA_SCHEDULER_PRECISION": Option(1.0, type="float"),
"FORCE_EXECV": Option(False, type="bool"),
"HIJACK_ROOT_LOGGER": Option(True, type="bool"),
"CONSUMER": Option("celery.worker.consumer.Consumer"),
"LOG_FORMAT": Option(DEFAULT_PROCESS_LOG_FMT),
"LOG_COLOR": Option(type="bool"),
"LOG_LEVEL": Option("WARN", deprecate_by="2.4", remove_by="3.0",
alt="--loglevel argument"),
"LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
"MEDIATOR": Option("celery.worker.mediator.Mediator"),
"MAX_TASKS_PER_CHILD": Option(type="int"),
"POOL": Option(DEFAULT_POOL),
"POOL_PUTLOCKS": Option(True, type="bool"),
"PREFETCH_MULTIPLIER": Option(4, type="int"),
"STATE_DB": Option(),
"TASK_LOG_FORMAT": Option(DEFAULT_TASK_LOG_FMT),
"TASK_SOFT_TIME_LIMIT": Option(type="float"),
"TASK_TIME_LIMIT": Option(type="float"),
},
"CELERYBEAT": {
"SCHEDULE": Option({}, type="dict"),
"SCHEDULER": Option("celery.beat.PersistentScheduler"),
"SCHEDULE_FILENAME": Option("celerybeat-schedule"),
"MAX_LOOP_INTERVAL": Option(5 * 60, type="float"),
"LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"),
"LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
},
"CELERYMON": {
"LOG_LEVEL": Option("INFO", deprecate_by="2.4", remove_by="3.0"),
"LOG_FILE": Option(deprecate_by="2.4", remove_by="3.0"),
"LOG_FORMAT": Option(DEFAULT_LOG_FMT),
},
"EMAIL": {
"HOST": Option("localhost"),
"PORT": Option(25, type="int"),
"HOST_USER": Option(None),
"HOST_PASSWORD": Option(None),
"TIMEOUT": Option(2, type="float"),
"USE_SSL": Option(False, type="bool"),
"USE_TLS": Option(False, type="bool"),
},
"SERVER_EMAIL": Option("celery@localhost"),
"ADMINS": Option((), type="tuple"),
"TT": {
"HOST": Option(None, type="string"),
"PORT": Option(None, type="int"),
},
}
def flatten(d, ns=""):
stack = deque([(ns, d)])
while stack:
name, space = stack.popleft()
for key, value in space.iteritems():
if isinstance(value, dict):
stack.append((name + key + '_', value))
else:
yield name + key, value
DEFAULTS = dict((key, value.default) for key, value in flatten(NAMESPACES))
def find_deprecated_settings(source):
from celery.utils import warn_deprecated
for name, opt in flatten(NAMESPACES):
if (opt.deprecate_by or opt.remove_by) and getattr(source, name, None):
warn_deprecated(description="The %r setting" % (name, ),
deprecation=opt.deprecate_by,
removal=opt.remove_by,
alternative=opt.alt)
@memoize(maxsize=None)
def find(name, namespace="celery"):
# - Try specified namespace first.
namespace = namespace.upper()
try:
return namespace, name.upper(), NAMESPACES[namespace][name.upper()]
except KeyError:
# - Try all the other namespaces.
for ns, keys in NAMESPACES.iteritems():
if ns.upper() == name.upper():
return None, ns, keys
elif isinstance(keys, dict):
try:
return ns, name.upper(), keys[name.upper()]
except KeyError:
pass
# - See if name is a qualname last.
return None, name.upper(), DEFAULTS[name.upper()]
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import hashlib
import hmac
import os
import socket
import urlparse
import eventlet
import httplib2
from oslo.config import cfg
from quantumclient.v2_0 import client
import webob
from quantum.common import config
from quantum.common import utils
from quantum.openstack.common import log as logging
from quantum import wsgi
LOG = logging.getLogger(__name__)
DEVICE_OWNER_ROUTER_INTF = "network:router_interface"
class MetadataProxyHandler(object):
OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True)
]
def __init__(self, conf):
self.conf = conf
self.auth_info = {}
def _get_quantum_client(self):
qclient = client.Client(
username=self.conf.admin_user,
password=self.conf.admin_password,
tenant_name=self.conf.admin_tenant_name,
auth_url=self.conf.auth_url,
auth_strategy=self.conf.auth_strategy,
region_name=self.conf.auth_region,
auth_token=self.auth_info.get('auth_token'),
endpoint_url=self.auth_info.get('endpoint_url'),
endpoint_type=self.conf.endpoint_type
)
return qclient
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
LOG.debug(_("Request: %s"), req)
instance_id = self._get_instance_id(req)
if instance_id:
return self._proxy_request(instance_id, req)
else:
return webob.exc.HTTPNotFound()
except Exception:
LOG.exception(_("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def _get_instance_id(self, req):
qclient = self._get_quantum_client()
remote_address = req.headers.get('X-Forwarded-For')
network_id = req.headers.get('X-Quantum-Network-ID')
router_id = req.headers.get('X-Quantum-Router-ID')
if network_id:
networks = [network_id]
else:
internal_ports = qclient.list_ports(
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF)['ports']
networks = [p['network_id'] for p in internal_ports]
ports = qclient.list_ports(
network_id=networks,
fixed_ips=['ip_address=%s' % remote_address])['ports']
self.auth_info = qclient.get_auth_info()
if len(ports) == 1:
return ports[0]['device_id']
def _proxy_request(self, instance_id, req):
headers = {
'X-Forwarded-For': req.headers.get('X-Forwarded-For'),
'X-Instance-ID': instance_id,
'X-Instance-ID-Signature': self._sign_instance_id(instance_id)
}
url = urlparse.urlunsplit((
'http',
'%s:%s' % (self.conf.nova_metadata_ip,
self.conf.nova_metadata_port),
req.path_info,
req.query_string,
''))
h = httplib2.Http()
resp, content = h.request(url, method=req.method, headers=headers,
body=req.body)
if resp.status == 200:
LOG.debug(str(resp))
return content
elif resp.status == 403:
msg = _(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
)
LOG.warn(msg)
return webob.exc.HTTPForbidden()
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.warn(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
def _sign_instance_id(self, instance_id):
return hmac.new(self.conf.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
class UnixDomainHttpProtocol(eventlet.wsgi.HttpProtocol):
def __init__(self, request, client_address, server):
if client_address == '':
client_address = ('<local>', 0)
# base class is old-style, no super does not work properly
eventlet.wsgi.HttpProtocol.__init__(self, request, client_address,
server)
class UnixDomainWSGIServer(wsgi.Server):
def start(self, application, file_socket, backlog=128):
sock = eventlet.listen(file_socket,
family=socket.AF_UNIX,
backlog=backlog)
self.pool.spawn_n(self._run, application, sock)
def _run(self, application, socket):
"""Start a WSGI service in a new green thread."""
logger = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket,
application,
custom_pool=self.pool,
protocol=UnixDomainHttpProtocol,
log=logging.WritableLogger(logger))
class UnixDomainMetadataProxy(object):
OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket'))
]
def __init__(self, conf):
self.conf = conf
dirname = os.path.dirname(cfg.CONF.metadata_proxy_socket)
if os.path.isdir(dirname):
try:
os.unlink(cfg.CONF.metadata_proxy_socket)
except OSError:
if os.path.exists(cfg.CONF.metadata_proxy_socket):
raise
else:
os.makedirs(dirname, 0755)
def run(self):
server = UnixDomainWSGIServer('quantum-metadata-agent')
server.start(MetadataProxyHandler(self.conf),
self.conf.metadata_proxy_socket)
server.wait()
def main():
eventlet.monkey_patch()
cfg.CONF.register_opts(UnixDomainMetadataProxy.OPTS)
cfg.CONF.register_opts(MetadataProxyHandler.OPTS)
cfg.CONF(project='quantum')
config.setup_logging(cfg.CONF)
utils.log_opt_values(LOG)
proxy = UnixDomainMetadataProxy(cfg.CONF)
proxy.run()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplexArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplexArrayToTensorProto,
np.complex128: SlowAppendComplexArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def MakeTensorShapeProto(shape):
"""Create a TensorShapeProto.
Args:
shape: List of integers representing the dimensions of the tensor.
Returns:
A TensorShapeProto.
"""
return tensor_shape_pb2.TensorShapeProto(
dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape])
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, list):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.float32: _FilterFloat,
dtypes.float64: _FilterFloat,
dtypes.int32: _FilterInt,
dtypes.uint8: _FilterInt,
dtypes.int16: _FilterInt,
dtypes.int8: _FilterInt,
dtypes.string: _FilterStr,
dtypes.complex64: _FilterComplex,
dtypes.int64: _FilterInt,
dtypes.bool: _FilterBool,
dtypes.qint32: _FilterInt,
dtypes.quint8: _FilterInt,
dtypes.qint8: _FilterInt,
}
def _AssertCompatible(values, dtype):
fn = _TF_TO_IS_OK.get(dtype, _FilterNotTensor)
mismatch = fn(values)
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
np_dt = dtype.as_numpy_dtype if dtype else None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
if list(nparray.shape) != _GetDenseDimensions(values):
raise ValueError("Argument must be a dense tensor: %s" % values)
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
nparray = nparray.astype(np.int32)
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint32]:
numpy_dtype = dtype
if dtype is not None and not dtype.base_dtype == numpy_dtype.base_dtype:
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=MakeTensorShapeProto(shape))
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def ConstantValue(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims])
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([np.prod([dim.value for dim in input_shape.dims])])
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.array([input_shape.ndims])
else:
return None
elif tensor.op.type == "Range":
start = ConstantValue(tensor.op.inputs[0])
if start is None:
return None
limit = ConstantValue(tensor.op.inputs[1])
if limit is None:
return None
delta = ConstantValue(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
else:
return None
|
|
"""Tests for the HomeKit component."""
from unittest.mock import patch, ANY, Mock
import pytest
from homeassistant import setup
from homeassistant.components.homekit import (
generate_aid, HomeKit, MAX_DEVICES, STATUS_READY,
STATUS_RUNNING, STATUS_STOPPED, STATUS_WAIT)
from homeassistant.components.homekit.accessories import HomeBridge
from homeassistant.components.homekit.const import (
CONF_AUTO_START, CONF_SAFE_MODE, BRIDGE_NAME, DEFAULT_PORT,
DEFAULT_SAFE_MODE, DOMAIN, HOMEKIT_FILE, SERVICE_HOMEKIT_START)
from homeassistant.const import (
CONF_NAME, CONF_IP_ADDRESS, CONF_PORT,
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import State
from homeassistant.helpers.entityfilter import generate_filter
from tests.components.homekit.common import patch_debounce
IP_ADDRESS = '127.0.0.1'
PATH_HOMEKIT = 'homeassistant.components.homekit'
@pytest.fixture(scope='module')
def debounce_patcher():
"""Patch debounce method."""
patcher = patch_debounce()
yield patcher.start()
patcher.stop()
def test_generate_aid():
"""Test generate aid method."""
aid = generate_aid('demo.entity')
assert isinstance(aid, int)
assert aid >= 2 and aid <= 18446744073709551615
with patch(PATH_HOMEKIT + '.adler32') as mock_adler32:
mock_adler32.side_effect = [0, 1]
assert generate_aid('demo.entity') is None
async def test_setup_min(hass):
"""Test async_setup with min config options."""
with patch(PATH_HOMEKIT + '.HomeKit') as mock_homekit:
assert await setup.async_setup_component(
hass, DOMAIN, {DOMAIN: {}})
mock_homekit.assert_any_call(hass, BRIDGE_NAME, DEFAULT_PORT, None, ANY,
{}, DEFAULT_SAFE_MODE)
assert mock_homekit().setup.called is True
# Test auto start enabled
mock_homekit.reset_mock()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
mock_homekit().start.assert_called_with(ANY)
async def test_setup_auto_start_disabled(hass):
"""Test async_setup with auto start disabled and test service calls."""
config = {DOMAIN: {CONF_AUTO_START: False, CONF_NAME: 'Test Name',
CONF_PORT: 11111, CONF_IP_ADDRESS: '172.0.0.0',
CONF_SAFE_MODE: DEFAULT_SAFE_MODE}}
with patch(PATH_HOMEKIT + '.HomeKit') as mock_homekit:
mock_homekit.return_value = homekit = Mock()
assert await setup.async_setup_component(
hass, DOMAIN, config)
mock_homekit.assert_any_call(hass, 'Test Name', 11111, '172.0.0.0', ANY,
{}, DEFAULT_SAFE_MODE)
assert mock_homekit().setup.called is True
# Test auto_start disabled
homekit.reset_mock()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert homekit.start.called is False
# Test start call with driver is ready
homekit.reset_mock()
homekit.status = STATUS_READY
await hass.services.async_call(
DOMAIN, SERVICE_HOMEKIT_START, blocking=True)
assert homekit.start.called is True
# Test start call with driver started
homekit.reset_mock()
homekit.status = STATUS_STOPPED
await hass.services.async_call(
DOMAIN, SERVICE_HOMEKIT_START, blocking=True)
assert homekit.start.called is False
async def test_homekit_setup(hass, hk_driver):
"""Test setup of bridge and driver."""
homekit = HomeKit(hass, BRIDGE_NAME, DEFAULT_PORT, None, {}, {},
DEFAULT_SAFE_MODE)
with patch(PATH_HOMEKIT + '.accessories.HomeDriver',
return_value=hk_driver) as mock_driver, \
patch('homeassistant.util.get_local_ip') as mock_ip:
mock_ip.return_value = IP_ADDRESS
await hass.async_add_job(homekit.setup)
path = hass.config.path(HOMEKIT_FILE)
assert isinstance(homekit.bridge, HomeBridge)
mock_driver.assert_called_with(
hass, address=IP_ADDRESS, port=DEFAULT_PORT, persist_file=path)
assert homekit.driver.safe_mode is False
# Test if stop listener is setup
assert hass.bus.async_listeners().get(EVENT_HOMEASSISTANT_STOP) == 1
async def test_homekit_setup_ip_address(hass, hk_driver):
"""Test setup with given IP address."""
homekit = HomeKit(hass, BRIDGE_NAME, DEFAULT_PORT, '172.0.0.0', {}, {},
None)
with patch(PATH_HOMEKIT + '.accessories.HomeDriver',
return_value=hk_driver) as mock_driver:
await hass.async_add_job(homekit.setup)
mock_driver.assert_called_with(
hass, address='172.0.0.0', port=DEFAULT_PORT, persist_file=ANY)
async def test_homekit_setup_safe_mode(hass, hk_driver):
"""Test if safe_mode flag is set."""
homekit = HomeKit(hass, BRIDGE_NAME, DEFAULT_PORT, None, {}, {}, True)
with patch(PATH_HOMEKIT + '.accessories.HomeDriver',
return_value=hk_driver):
await hass.async_add_job(homekit.setup)
assert homekit.driver.safe_mode is True
async def test_homekit_add_accessory():
"""Add accessory if config exists and get_acc returns an accessory."""
homekit = HomeKit('hass', None, None, None, lambda entity_id: True, {},
None)
homekit.driver = 'driver'
homekit.bridge = mock_bridge = Mock()
with patch(PATH_HOMEKIT + '.get_accessory') as mock_get_acc:
mock_get_acc.side_effect = [None, 'acc', None]
homekit.add_bridge_accessory(State('light.demo', 'on'))
mock_get_acc.assert_called_with('hass', 'driver', ANY, 363398124, {})
assert not mock_bridge.add_accessory.called
homekit.add_bridge_accessory(State('demo.test', 'on'))
mock_get_acc.assert_called_with('hass', 'driver', ANY, 294192020, {})
assert mock_bridge.add_accessory.called
homekit.add_bridge_accessory(State('demo.test_2', 'on'))
mock_get_acc.assert_called_with('hass', 'driver', ANY, 429982757, {})
mock_bridge.add_accessory.assert_called_with('acc')
async def test_homekit_entity_filter(hass):
"""Test the entity filter."""
entity_filter = generate_filter(['cover'], ['demo.test'], [], [])
homekit = HomeKit(hass, None, None, None, entity_filter, {}, None)
with patch(PATH_HOMEKIT + '.get_accessory') as mock_get_acc:
mock_get_acc.return_value = None
homekit.add_bridge_accessory(State('cover.test', 'open'))
assert mock_get_acc.called is True
mock_get_acc.reset_mock()
homekit.add_bridge_accessory(State('demo.test', 'on'))
assert mock_get_acc.called is True
mock_get_acc.reset_mock()
homekit.add_bridge_accessory(State('light.demo', 'light'))
assert mock_get_acc.called is False
async def test_homekit_start(hass, hk_driver, debounce_patcher):
"""Test HomeKit start method."""
pin = b'123-45-678'
homekit = HomeKit(hass, None, None, None, {}, {'cover.demo': {}}, None)
homekit.bridge = Mock()
homekit.bridge.accessories = []
homekit.driver = hk_driver
hass.states.async_set('light.demo', 'on')
state = hass.states.async_all()[0]
with patch(PATH_HOMEKIT + '.HomeKit.add_bridge_accessory') as \
mock_add_acc, \
patch(PATH_HOMEKIT + '.show_setup_message') as mock_setup_msg, \
patch('pyhap.accessory_driver.AccessoryDriver.add_accessory') as \
hk_driver_add_acc, \
patch('pyhap.accessory_driver.AccessoryDriver.start') as \
hk_driver_start:
await hass.async_add_job(homekit.start)
mock_add_acc.assert_called_with(state)
mock_setup_msg.assert_called_with(hass, pin)
hk_driver_add_acc.assert_called_with(homekit.bridge)
assert hk_driver_start.called
assert homekit.status == STATUS_RUNNING
# Test start() if already started
hk_driver_start.reset_mock()
await hass.async_add_job(homekit.start)
assert not hk_driver_start.called
async def test_homekit_stop(hass):
"""Test HomeKit stop method."""
homekit = HomeKit(hass, None, None, None, None, None, None)
homekit.driver = Mock()
assert homekit.status == STATUS_READY
await hass.async_add_job(homekit.stop)
homekit.status = STATUS_WAIT
await hass.async_add_job(homekit.stop)
homekit.status = STATUS_STOPPED
await hass.async_add_job(homekit.stop)
assert homekit.driver.stop.called is False
# Test if driver is started
homekit.status = STATUS_RUNNING
await hass.async_add_job(homekit.stop)
assert homekit.driver.stop.called is True
async def test_homekit_too_many_accessories(hass, hk_driver):
"""Test adding too many accessories to HomeKit."""
homekit = HomeKit(hass, None, None, None, None, None, None)
homekit.bridge = Mock()
homekit.bridge.accessories = range(MAX_DEVICES + 1)
homekit.driver = hk_driver
with patch('pyhap.accessory_driver.AccessoryDriver.start'), \
patch('pyhap.accessory_driver.AccessoryDriver.add_accessory'), \
patch('homeassistant.components.homekit._LOGGER.warning') \
as mock_warn:
await hass.async_add_job(homekit.start)
assert mock_warn.called is True
|
|
#-------------------------------------------------------------------------------
# Name: Main
# Purpose: To handle the main gui
#
# Author: ParkerMc
#
# Created: 21/05/2016
# Copyright: (c) ParkerMc 2016
# Licence: MIT
#-------------------------------------------------------------------------------
import error, sys
sys.excepthook = error.excepthook
import os, subprocess, platform, warn, close, mapstm, worldstm
from PyQt4 import QtGui, QtCore, uic
from PyQt4.QtCore import QThread
form_class = uic.loadUiType("ui/main.ui")[0]
class Main(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
self.forceClose = False
self.lmr = 0
self.worlds = []
self.worldnames = []
self.maps = []
self.mapnames = []
self.backm = []
self.backw = []
self.worldpaths = []
self.filep = ""
global sel
sel = self
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.world = QtGui.QComboBox()
self.Dimension = QtGui.QComboBox()
self.Dimension.addItem("Overworld")
self.Dimension.addItem("Nether")
self.Dimension.addItem("End")
self.rmode = QtGui.QComboBox()
self.rmode.addItem("Normal")
self.rmode.addItem("Lighting")
self.rmode.addItem("Smooth Light")
self.rmode.addItem("Night")
self.rmode.addItem("Smooth Night")
self.rmode.addItem("Cave")
self.rmoden = QtGui.QComboBox()
self.rmoden.addItem("Normal")
self.rmoden.addItem("Lighting")
self.rmoden.addItem("Smooth Light")
self.nDir = QtGui.QComboBox()
self.nDir.addItem("Upper Left")
self.nDir.addItem("Upper Right")
self.nDir.addItem("Lower Left")
self.nDir.addItem("Lower Right")
self.imgfor = QtGui.QComboBox()
self.imgfor.addItem("Png")
self.imgfor.addItem("Jpg")
self.imgfor.addItem("Jpeg")
self.outputb.clicked.connect(self.outputbf)
self.packb.clicked.connect(self.packbf)
self.actionOpen.triggered.connect(self.actionOpenf)
self.actionSave.triggered.connect(self.save)
self.actionRun.triggered.connect(self.srun)
self.Worlds.cellDoubleClicked.connect(self.path)
self.Worlds.cellClicked.connect(self.path)
self.Worlds.cellEntered.connect(self.path)
self.Worlds.cellChanged.connect(self.onchangeworld)
self.Maps.cellChanged.connect(self.oncchangemap)
self.Maps.currentCellChanged.connect(self.onchangemap)
self.Maps.cellClicked.connect(self.onchangemap)
self.Maps.cellEntered.connect(self.onchangemap)
self.Maps.cellDoubleClicked.connect(self.onchangemap)
self.Dimension.currentIndexChanged.connect(self.Dimensionc)
self.nDir.currentIndexChanged.connect(self.nDirc)
self.imgfor.currentIndexChanged.connect(self.imgforc)
self.rmode.currentIndexChanged.connect(self.rmodec)
self.rmoden.currentIndexChanged.connect(self.rmodenc)
self.run.clicked.connect(self.srun)
self.actionWebsite.triggered.connect(self.actionWebsitef)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+S"), self, self.savekey)
QtGui.QShortcut(QtGui.QKeySequence("Ctrl+R"), self, self.runkey)
self.dump()
def actionWebsitef(self):
msgbox = QtGui.QMessageBox(self)
msgbox.setWindowTitle("Minecraft Map Gen")
msgbox.setText("<a href='http://ParkerMc.ddns.net'> ParkerMc.ddns.net </a>")
msgbox.exec_()
def runkey(self,no="no"):
if no == "no":
self.srun()
def savekey(self,no="no"):
if no == "no":
self.save()
def dump(self):
self.backm = []
self.backw = []
for i in range(1,self.Worlds.rowCount()):
try: self.backw.append(str(self.Worlds.item(i,0).text()))
except: self.backw.append("")
for i in range(1,self.Maps.rowCount()-1):
try: self.backm.append((self.Maps.item(i,0).text()))
except: self.backm.append("")
def srun(self):
self.save()
try:
if str(self.outputt.text()).replace(" ","") == "" or not os.path.exists(str(self.outputt.text())):
self.warn("You must provide a valid output path.")
return False
except:
self.warn("You must provide a valid output path.")
return False
try:
if str(self.Worlds.item(1,0).text()).replace(" ","")=="" or str(self.Worlds.item(1,1).text()).replace(" ","")=="":
self.warn("You must have at least 1 world.")
return False
except:
self.warn("You must have at least 1 world.")
return False
try:
if str(self.Maps.item(1,0).text()).replace(" ","")=="" or str(self.Maps.item(1,1).text()).replace(" ","")=="":
self.warn("You must have at least 1 map.")
return False
except:
self.warn("You must have at least 1 map.")
return False
global fileo
fileo = self.filep
run()
self.tabWidget.setFixedWidth(0)
self.run.setFixedWidth(0)
self.output.setFixedWidth(781)
self.cancel.setFixedWidth(91)
def imgforc(self,a):
mapstm.imgforc(self,a)
def nDirc(self,a):
mapstm.nDirc(self,a)
def rmodec(self,a):
mapstm.rmodec(self,a)
def rmodenc(self,a):
mapstm.rmodenc(self,a)
def Dimensionc(self,a):
mapstm.Dimensionc(self,a)
def worldc(self,a):
mapstm.worldc(self,a)
def oncchangemap(self,a,b):
mapstm.oncchangemap(self,a,b)
def mapsgen(self):
mapstm.mapsgen(self)
def newrowmap(self):
mapstm.newrowmap(self)
def onchangemap(self,a,b,c=0,d=0):
mapstm.onchangemap(self,a,b,c,d)
def onchangeworld(self,a,b):
worldstm.onchangeworld(self,a,b)
def path(self,a,b):
worldstm.path(self,a,b)
def actionOpenf(self):
ofile = QtGui.QFileDialog.getOpenFileName(filter="Config File (*.cfg)")
try:
f = open(ofile,"r")
except: return
while self.Maps.rowCount() > 2:
self.Maps.removeRow(self.Maps.rowCount()-1)
while self.Worlds.rowCount() > 2:
self.Worlds.removeRow(self.Worlds.rowCount()-1)
rawfile = f.readlines()
f.close()
self.outputt.setText(str(rawfile[2]).replace("#","").replace("\n","").strip())
self.packt.setText(str(rawfile[3]).replace("#","").replace("\n","").strip())
self.processes.setValue(int(str(rawfile[4]).replace("#","").replace("\n","").strip()))
j = 0
k = 1
for i in str(rawfile[5]).replace("#","").replace("\n","").split(","):
item = QtGui.QTableWidgetItem()
item.setText(i.strip())
self.Worlds.setItem(k,j,item)
if j >= 1:
j = -1
k += 1
j+=1
j = 0
k = 1
for i in str(rawfile[6]).replace("#","").replace("\n","").split(","):
item = QtGui.QTableWidgetItem()
item.setText(i.strip())
self.Maps.setItem(k,j,item)
if j >= 6:
j = -1
k += 1
j+=1
item = QtGui.QTableWidgetItem()
item.setText("Overworld")
self.Maps.setItem(self.Maps.rowCount()-1,2,item)
item = QtGui.QTableWidgetItem()
item.setText("Normal")
self.Maps.setItem(self.Maps.rowCount()-1,3,item)
item = QtGui.QTableWidgetItem()
item.setText("Upper Left")
self.Maps.setItem(self.Maps.rowCount()-1,4,item)
item = QtGui.QTableWidgetItem()
item.setText("Png")
self.Maps.setItem(self.Maps.rowCount()-1,5,item)
item = QtGui.QTableWidgetItem()
item.setText("95")
self.Maps.setItem(self.Maps.rowCount()-1,6,item)
self.dump()
self.filep = ofile
def outputbf(self):
self.outputt.setText(QtGui.QFileDialog.getExistingDirectory(directory=self.outputt.text()))
def packbf(self):
self.packt.setText(QtGui.QFileDialog.getOpenFileName(directory=self.packt.text(),filter="Texture Pack (*.zip; *.jar)"))
def save(self):
if self.filep == "":
try:
self.filep = ""
self.filep = QtGui.QFileDialog.getSaveFileName(filter="Config File (*.cfg)")
except: return False
if self.filep != "":
rsave()
def warncell(self, a, b, i):
print str(a)+"-"+str(b)+"-"+str(i)
def warn(self, i):
self.wdialog = warn.warn(i)
self.wdialog.exec_()
def closeEvent(self, event):
closen = True
if self.filep == "":
if str(self.outputt.text()).strip() != "" or str(self.packt.text()).strip() != "" or str(self.processes.value()).strip() != "1" or len(self.worlds) != 0 or len(self.maps) != 0:
closen = False
else:
f = open(self.filep,"r")
lines = f.readlines()
f.close()
"\n#"+str(self.maps).replace("[","").replace("]","").replace("(","").replace(")","").replace("'","")+"\n \n"
if lines[2].strip() != "#"+str(self.outputt.text()).strip() or lines[3].strip() != "#"+str(self.packt.text()).strip() or lines[4].strip() != "#"+str(self.processes.value()).strip() or lines[5].strip() != "#"+str(self.worlds).replace("[","").replace("]","").replace("(","").replace(")","").replace("'","").strip() or lines[6].strip() != "#"+str(self.maps).replace("[","").replace("]","").replace("(","").replace(")","").replace("'","").strip():
closen = False
if closen:
stopn()
else:
self.cdialog = close.close(self)
self.cdialog.exec_()
if not self.forceClose:
event.ignore()
class Worker(QThread):
def genOut(self):
global sel
output = "#Made with a generator by ParkerMc\n####Do NOT edit####\n"
output += "#"+sel.outputt.text()+"\n"+ "#"+sel.packt.text()+"\n#"+str(sel.processes.value())+"\n#"+str(sel.worlds).replace("[","").replace("]","").replace("(","").replace(")","").replace("'","")+"\n#"+str(sel.maps).replace("[","").replace("]","").replace("(","").replace(")","").replace("'","")+"\n \n"
for i, j in sel.worlds:
output += 'worlds["world_'+i+'"] = "'+ j.replace("level.dat","") +'"\n'
for i, j, k, l, m, n, o in sel.maps:
output += 'renders["'+str(i)+'"] = {\n "world": "world_'+str(j)+'",\n "title": "'+str(i)+'",\n "rendermode": "'
if str(k) == "Nether":
output += str(l).replace("Normal","normal").replace("lighting","lighting").replace("Smooth Light","smooth_lighting")
elif str(k) != "Nether":
output += str(l).replace("Normal","normal").replace("lighting","lighting").replace("Smooth Light","smooth_lighting").replace("Night","night").replace("Smooth Night","smooth_night").replace("Cave","cave")
output += '",\n "dimension": "'+str(k).replace("Overworld","overworld").replace("Nether","nether").replace("End","end")+'",\n "northdirection" : "'+str(m).replace("Upper Left","upper-left").replace("Upper Right","upper-right").replace("Lower Left","lower-left").replace("Lower Right","lower-right")+'",\n "imgformat" : "'+str(n).replace("Png","png").replace("Jpg","jpg").replace("Jpeg","jpeg")+'",\n "imgquality" : "'+str(o)+'",\n } \n \n'
output += 'outputdir = "'+str(sel.outputt.text()).replace("\\","/")+'"'
if str(sel.packt.text()).replace("","") != "":
output += '\ntexturepath = "'+str(sel.packt.text()).replace("\\","/")+'"'
return output
def run(self):
global go
global sel
global fileo
global stop
global genoutput
global sel
while not stop:
if genoutput:
output = self.genOut()
f = open(str(sel.filep).replace("\\","/"),"w")
f.write(output)
f.close()
genoutput = False
if go:
if platform.system() == "Windows" and platform.architecture() == "32Bit":
proc = subprocess.Popen('32bit\\overviewer.exe --config="'+str(fileo)+'"', shell=True, stdout=subprocess.PIPE)
if platform.system() == "Windows" and platform.architecture() == "64Bit":
proc = subprocess.Popen('64bit\\overviewer.exe --config="'+str(fileo)+'"', shell=True, stdout=subprocess.PIPE)
if platform.system() == "Linux":
proc = subprocess.Popen('overviewer.py --config="'+str(fileo)+'"', shell=True, stdout=subprocess.PIPE)
while not stop:
line = proc.stdout.readline()
if line.strip() == "":
pass
else:
print line.strip()
sel.output.append(line.strip())
if not line: break
if stop: break
proc.wait()
go = False
thread = Worker()
thread.start()
global genoutput
genoutput = False
global stop
stop = False
global go
go = False
def run():
global go
go = True
def rsave():
global genoutput
genoutput = True
def stopn():
global stop
run()
stop = True
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
myWindow = Main()
myWindow.show()
app.exec_()
thread.terminate()
|
|
# Natural Language Toolkit: Logic
#
# Author: Peter Wang
# Updated by: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
An implementation of the Hole Semantics model, following Blackburn and Bos,
Representation and Inference for Natural Language (CSLI, 2005).
The semantic representations are built by the grammar hole.fcfg.
This module contains driver code to read in sentences and parse them
according to a hole semantics grammar.
After parsing, the semantic representation is in the form of an underspecified
representation that is not easy to read. We use a "plugging" algorithm to
convert that representation into first-order logic formulas.
"""
from __future__ import print_function, unicode_literals
from functools import reduce
from nltk import compat
from nltk.parse import load_parser
from nltk.sem.skolemize import skolemize
from nltk.sem.logic import (AllExpression, AndExpression, ApplicationExpression,
ExistsExpression, IffExpression, ImpExpression,
LambdaExpression, NegatedExpression, OrExpression)
# Note that in this code there may be multiple types of trees being referred to:
#
# 1. parse trees
# 2. the underspecified representation
# 3. first-order logic formula trees
# 4. the search space when plugging (search tree)
#
class Constants(object):
ALL = 'ALL'
EXISTS = 'EXISTS'
NOT = 'NOT'
AND = 'AND'
OR = 'OR'
IMP = 'IMP'
IFF = 'IFF'
PRED = 'PRED'
LEQ = 'LEQ'
HOLE = 'HOLE'
LABEL = 'LABEL'
MAP = {ALL: lambda v, e: AllExpression(v.variable, e),
EXISTS: lambda v, e: ExistsExpression(v.variable, e),
NOT: NegatedExpression,
AND: AndExpression,
OR: OrExpression,
IMP: ImpExpression,
IFF: IffExpression,
PRED: ApplicationExpression}
class HoleSemantics(object):
"""
This class holds the broken-down components of a hole semantics, i.e. it
extracts the holes, labels, logic formula fragments and constraints out of
a big conjunction of such as produced by the hole semantics grammar. It
then provides some operations on the semantics dealing with holes, labels
and finding legal ways to plug holes with labels.
"""
def __init__(self, usr):
"""
Constructor. `usr' is a ``sem.Expression`` representing an
Underspecified Representation Structure (USR). A USR has the following
special predicates:
ALL(l,v,n),
EXISTS(l,v,n),
AND(l,n,n),
OR(l,n,n),
IMP(l,n,n),
IFF(l,n,n),
PRED(l,v,n,v[,v]*) where the brackets and star indicate zero or more repetitions,
LEQ(n,n),
HOLE(n),
LABEL(n)
where l is the label of the node described by the predicate, n is either
a label or a hole, and v is a variable.
"""
self.holes = set()
self.labels = set()
self.fragments = {} # mapping of label -> formula fragment
self.constraints = set() # set of Constraints
self._break_down(usr)
self.top_most_labels = self._find_top_most_labels()
self.top_hole = self._find_top_hole()
def is_node(self, x):
"""
Return true if x is a node (label or hole) in this semantic
representation.
"""
return x in (self.labels | self.holes)
def _break_down(self, usr):
"""
Extract holes, labels, formula fragments and constraints from the hole
semantics underspecified representation (USR).
"""
if isinstance(usr, AndExpression):
self._break_down(usr.first)
self._break_down(usr.second)
elif isinstance(usr, ApplicationExpression):
func, args = usr.uncurry()
if func.variable.name == Constants.LEQ:
self.constraints.add(Constraint(args[0], args[1]))
elif func.variable.name == Constants.HOLE:
self.holes.add(args[0])
elif func.variable.name == Constants.LABEL:
self.labels.add(args[0])
else:
label = args[0]
assert label not in self.fragments
self.fragments[label] = (func, args[1:])
else:
raise ValueError(usr.label())
def _find_top_nodes(self, node_list):
top_nodes = node_list.copy()
for f in compat.itervalues(self.fragments):
# the label is the first argument of the predicate
args = f[1]
for arg in args:
if arg in node_list:
top_nodes.discard(arg)
return top_nodes
def _find_top_most_labels(self):
"""
Return the set of labels which are not referenced directly as part of
another formula fragment. These will be the top-most labels for the
subtree that they are part of.
"""
return self._find_top_nodes(self.labels)
def _find_top_hole(self):
"""
Return the hole that will be the top of the formula tree.
"""
top_holes = self._find_top_nodes(self.holes)
assert len(top_holes) == 1 # it must be unique
return top_holes.pop()
def pluggings(self):
"""
Calculate and return all the legal pluggings (mappings of labels to
holes) of this semantics given the constraints.
"""
record = []
self._plug_nodes([(self.top_hole, [])], self.top_most_labels, {}, record)
return record
def _plug_nodes(self, queue, potential_labels, plug_acc, record):
"""
Plug the nodes in `queue' with the labels in `potential_labels'.
Each element of `queue' is a tuple of the node to plug and the list of
ancestor holes from the root of the graph to that node.
`potential_labels' is a set of the labels which are still available for
plugging.
`plug_acc' is the incomplete mapping of holes to labels made on the
current branch of the search tree so far.
`record' is a list of all the complete pluggings that we have found in
total so far. It is the only parameter that is destructively updated.
"""
if queue != []:
(node, ancestors) = queue[0]
if node in self.holes:
# The node is a hole, try to plug it.
self._plug_hole(node, ancestors, queue[1:], potential_labels, plug_acc, record)
else:
assert node in self.labels
# The node is a label. Replace it in the queue by the holes and
# labels in the formula fragment named by that label.
args = self.fragments[node][1]
head = [(a, ancestors) for a in args if self.is_node(a)]
self._plug_nodes(head + queue[1:], potential_labels, plug_acc, record)
else:
raise Exception('queue empty')
def _plug_hole(self, hole, ancestors0, queue, potential_labels0,
plug_acc0, record):
"""
Try all possible ways of plugging a single hole.
See _plug_nodes for the meanings of the parameters.
"""
# Add the current hole we're trying to plug into the list of ancestors.
assert hole not in ancestors0
ancestors = [hole] + ancestors0
# Try each potential label in this hole in turn.
for l in potential_labels0:
# Is the label valid in this hole?
if self._violates_constraints(l, ancestors):
continue
plug_acc = plug_acc0.copy()
plug_acc[hole] = l
potential_labels = potential_labels0.copy()
potential_labels.remove(l)
if len(potential_labels) == 0:
# No more potential labels. That must mean all the holes have
# been filled so we have found a legal plugging so remember it.
#
# Note that the queue might not be empty because there might
# be labels on there that point to formula fragments with
# no holes in them. _sanity_check_plugging will make sure
# all holes are filled.
self._sanity_check_plugging(plug_acc, self.top_hole, [])
record.append(plug_acc)
else:
# Recursively try to fill in the rest of the holes in the
# queue. The label we just plugged into the hole could have
# holes of its own so at the end of the queue. Putting it on
# the end of the queue gives us a breadth-first search, so that
# all the holes at level i of the formula tree are filled
# before filling level i+1.
# A depth-first search would work as well since the trees must
# be finite but the bookkeeping would be harder.
self._plug_nodes(queue + [(l, ancestors)], potential_labels, plug_acc, record)
def _violates_constraints(self, label, ancestors):
"""
Return True if the `label' cannot be placed underneath the holes given
by the set `ancestors' because it would violate the constraints imposed
on it.
"""
for c in self.constraints:
if c.lhs == label:
if c.rhs not in ancestors:
return True
return False
def _sanity_check_plugging(self, plugging, node, ancestors):
"""
Make sure that a given plugging is legal. We recursively go through
each node and make sure that no constraints are violated.
We also check that all holes have been filled.
"""
if node in self.holes:
ancestors = [node] + ancestors
label = plugging[node]
else:
label = node
assert label in self.labels
for c in self.constraints:
if c.lhs == label:
assert c.rhs in ancestors
args = self.fragments[label][1]
for arg in args:
if self.is_node(arg):
self._sanity_check_plugging(plugging, arg, [label] + ancestors)
def formula_tree(self, plugging):
"""
Return the first-order logic formula tree for this underspecified
representation using the plugging given.
"""
return self._formula_tree(plugging, self.top_hole)
def _formula_tree(self, plugging, node):
if node in plugging:
return self._formula_tree(plugging, plugging[node])
elif node in self.fragments:
pred, args = self.fragments[node]
children = [self._formula_tree(plugging, arg) for arg in args]
return reduce(Constants.MAP[pred.variable.name], children)
else:
return node
@compat.python_2_unicode_compatible
class Constraint(object):
"""
This class represents a constraint of the form (L =< N),
where L is a label and N is a node (a label or a hole).
"""
def __init__(self, lhs, rhs):
self.lhs = lhs
self.rhs = rhs
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.lhs == other.lhs and self.rhs == other.rhs
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '(%s < %s)' % (self.lhs, self.rhs)
def hole_readings(sentence, grammar_filename=None, verbose=False):
if not grammar_filename:
grammar_filename = 'grammars/sample_grammars/hole.fcfg'
if verbose:
print('Reading grammar file', grammar_filename)
parser = load_parser(grammar_filename)
# Parse the sentence.
tokens = sentence.split()
trees = list(parser.parse(tokens))
if verbose:
print('Got %d different parses' % len(trees))
all_readings = []
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree.label()['SEM'].simplify()
# Print the raw semantic representation.
if verbose:
print('Raw: ', sem)
# Skolemize away all quantifiers. All variables become unique.
while isinstance(sem, LambdaExpression):
sem = sem.term
skolemized = skolemize(sem)
if verbose:
print('Skolemized:', skolemized)
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(skolemized)
# Maybe show the details of the semantic representation.
if verbose:
print('Holes: ', hole_sem.holes)
print('Labels: ', hole_sem.labels)
print('Constraints: ', hole_sem.constraints)
print('Top hole: ', hole_sem.top_hole)
print('Top labels: ', hole_sem.top_most_labels)
print('Fragments:')
for l, f in hole_sem.fragments.items():
print('\t%s: %s' % (l, f))
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
readings = list(map(hole_sem.formula_tree, pluggings))
# Print out the formulas in a textual format.
if verbose:
for i, r in enumerate(readings):
print()
print('%d. %s' % (i, r))
print()
all_readings.extend(readings)
return all_readings
if __name__ == '__main__':
for r in hole_readings('a dog barks'):
print(r)
print()
for r in hole_readings('every girl chases a dog'):
print(r)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_canframe
----------------------------------
Tests for `canframe` module.
"""
import sys
import unittest
assert sys.version_info >= (3, 3, 0), "Python version 3.3 or later required!"
from can4python import exceptions
from can4python import cansignal
from can4python import canframe_definition
from can4python import canframe
class TestCanFrame(unittest.TestCase):
def setUp(self):
self.frame = canframe.CanFrame(1, b'\x00\x02\x00\x08\x00\x00\x00\xff')
self.testsig1 = cansignal.CanSignalDefinition('testsignal1', 56, 1) # Least significant bit in last byte
self.testsig2 = cansignal.CanSignalDefinition('testsignal2', 8, 16, endianness='big') # Two leftmost bytes
self.testsig3 = cansignal.CanSignalDefinition('testsignal3', 24, 16, endianness='little',
maxvalue=1200) # Two center bytes
self.testsig4 = cansignal.CanSignalDefinition('testsignal4', 48, 8, signaltype='signed') # Second last byte
self.frame_def = canframe_definition.CanFrameDefinition(1, 'testmessage')
self.frame_def.signaldefinitions.append(self.testsig1)
self.frame_def.signaldefinitions.append(self.testsig2)
self.frame_def.signaldefinitions.append(self.testsig3)
self.frame_def.signaldefinitions.append(self.testsig4)
def testConstructor(self):
frame1 = canframe.CanFrame(0x7FF, b'\x02', 'standard')
self.assertEqual(frame1.frame_id, 0x7FF)
self.assertEqual(frame1.frame_data, b'\x02')
self.assertEqual(frame1.frame_format, 'standard')
frame2 = canframe.CanFrame(0x1FFFFFFF, b'\x02\x03', 'extended')
self.assertEqual(frame2.frame_id, 0x1FFFFFFF)
self.assertEqual(frame2.frame_data, b'\x02\x03')
self.assertEqual(frame2.frame_format, 'extended')
def testConstructorNamedArguments(self):
frame = canframe.CanFrame(frame_id=3, frame_data=b'\x04', frame_format='extended')
self.assertEqual(frame.frame_id, 3)
self.assertEqual(frame.frame_data, b'\x04')
self.assertEqual(frame.frame_format, 'extended')
def testConstructorFromEmptyBytes(self):
frame = canframe.CanFrame.from_empty_bytes(5, 6, 'extended')
self.assertEqual(frame.frame_id, 5)
self.assertEqual(frame.frame_data, b'\x00\x00\x00\x00\x00\x00')
self.assertEqual(frame.frame_format, 'extended')
def testConstructorFromRawframes(self):
frame1 = canframe.CanFrame.from_rawframe(b'\x07\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(frame1.frame_id, 7)
self.assertEqual(frame1.frame_format, 'standard')
self.assertEqual(frame1.frame_data, b'\x00\x00\x00\x00\x00\x00\x00\x00')
frame2 = canframe.CanFrame.from_rawframe(b'\x03\x00\x00\x80\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
self.assertEqual(frame2.frame_id, 3)
self.assertEqual(frame2.frame_format, 'extended')
self.assertEqual(frame2.frame_data, b'\x00\x00\x00\x00\x00\x00')
def testWrongConstructor(self):
self.assertRaises(exceptions.CanException, canframe.CanFrame, -1, b'\x01')
self.assertRaises(exceptions.CanException, canframe.CanFrame, None, b'\x01')
self.assertRaises(exceptions.CanException, canframe.CanFrame, 0x800, b'\x01')
self.assertRaises(exceptions.CanException, canframe.CanFrame, 0x800, b'\x01', 'standard')
self.assertRaises(exceptions.CanException, canframe.CanFrame, 0x20000000, b'\x01', 'extended')
self.assertRaises(exceptions.CanException, canframe.CanFrame, "1,0", b'\x01')
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, "ABC")
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, "123")
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, None)
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, b'\x01\x02\x03\x04\x05\x06\x07\x08\x09')
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, b'\x01', "ABC")
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, b'\x01', 0)
self.assertRaises(exceptions.CanException, canframe.CanFrame, 1, b'\x01', None)
def testWrongConstructorFromEmptyBytes(self):
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, "ABC", 8)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, "ABC", 8, 'standard')
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, "ABC", 8, 'extended')
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, -1, 8)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, None, 8)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, 9)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, -1)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, None)
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, "ABC")
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, 8, "ABC")
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_empty_bytes, 1, 8, None)
def testWrongConstructorFromRawframe(self):
self.assertRaises(exceptions.CanException, canframe.CanFrame.from_rawframe,
b'\x01\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xFF')
def testFrameidGet(self):
self.assertEqual(self.frame.frame_id, 1)
def testFrameidSet(self):
known_values = [0, 1, 100, 1000]
for value in known_values:
self.frame.frame_id = value
self.assertEqual(self.frame.frame_id, value)
def testFrameidSetWrongValue(self):
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_id', -1)
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_id', 0x800)
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_id', "1,0")
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_id', None)
def testFrameFormatGet(self):
self.assertEqual(self.frame.frame_format, 'standard')
def testFrameFormatSet(self):
known_values = ['standard', 'extended']
for value in known_values:
self.frame.frame_format = value
self.assertEqual(self.frame.frame_format, value)
def testFrameFormatSetWrongValue(self):
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_format', 7)
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_format', 'ABC')
def testFramedataGet(self):
self.assertEqual(self.frame.frame_data, b'\x00\x02\x00\x08\x00\x00\x00\xff')
def testFramedataSet(self):
known_values = [b'', b'\x00', b'\x01\x02\x03\x04\x05\x06\x07\x08']
for value in known_values:
self.frame.frame_data = value
self.assertEqual(self.frame.frame_data, value)
def testFramedataSetWrongValue(self):
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_data',
b'\x01\x02\x03\x04\x05\x06\x07\x08\x09')
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_data', None)
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_data', "7")
self.assertRaises(exceptions.CanException, setattr, self.frame, 'frame_data', "\x01")
def testSignalvalueSet(self):
self.frame.set_signalvalue(self.testsig1)
self.frame.set_signalvalue(self.testsig1, 0)
self.frame.set_signalvalue(self.testsig1, 1)
self.frame.set_signalvalue(self.testsig1, 0)
self.frame.set_signalvalue(self.testsig2, 1000)
self.frame.set_signalvalue(self.testsig2, 27.3)
self.frame.set_signalvalue(self.testsig2, 0)
self.frame.set_signalvalue(self.testsig3, 1000)
self.frame.set_signalvalue(self.testsig3, 0)
self.assertEqual(self.frame.frame_data, b'\x00\x00\x00\x00\x00\x00\x00\xfe')
self.frame.set_signalvalue(self.testsig1, 1)
self.frame.set_signalvalue(self.testsig2, 16)
self.frame.set_signalvalue(self.testsig3, 512)
self.assertEqual(self.frame.frame_data, b'\x00\x10\x00\x00\x02\x00\x00\xff')
self.frame.set_signalvalue(self.testsig3, 1500) # Limited to 1200
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 1200)
def testSignalvalueSetSigned(self):
self.frame.frame_data = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.frame.set_signalvalue(self.testsig4, -1)
self.assertEqual(self.frame.frame_data, b'\x00\x00\x00\x00\x00\x00\xff\x00')
self.frame.set_signalvalue(self.testsig4, -128)
self.assertEqual(self.frame.frame_data, b'\x00\x00\x00\x00\x00\x00\x80\x00')
def testSignalvalueSetSingle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 56, 32, endianness='big', signaltype='single')
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\x00\x00')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1.set_signalvalue(testsig1, 0.15625)
self.assertEqual(frame1.frame_data, b'\x00\x00\x00\x00\x3E\x20\x00\x00')
def testSignalvalueSetSingleLittle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 32, 32, endianness='little', signaltype='single')
print(testsig1.get_descriptive_ascii_art())
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\x00\x00')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1.set_signalvalue(testsig1, 0.15625)
self.assertEqual(frame1.frame_data, b'\x00\x00\x00\x00\x00\x00\x20\x3E')
def testSignalvalueGetSingle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 56, 32, endianness='big', signaltype='single')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x3E\x20\x00\x00')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 0.15625)
def testSignalvalueGetSingleLittle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 32, 32, endianness='little', signaltype='single')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\x20\x3E')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 0.15625)
def testSignalvalueSetDouble(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 56, 64, endianness='big', signaltype='double')
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\x00\x00')
# Example from https://en.wikipedia.org/wiki/Double-precision_floating-point_format
frame1.set_signalvalue(testsig1, 2.0)
self.assertEqual(frame1.frame_data, b'\x40\x00\x00\x00\x00\x00\x00\x00')
frame1.set_signalvalue(testsig1, 1.0)
self.assertEqual(frame1.frame_data, b'\x3F\xF0\x00\x00\x00\x00\x00\x00')
def testSignalvalueSetDoubleLittle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 0, 64, endianness='little', signaltype='double')
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\x00\x00')
# Example from https://en.wikipedia.org/wiki/Double-precision_floating-point_format
frame1.set_signalvalue(testsig1, 2.0)
self.assertEqual(frame1.frame_data, b'\x00\x00\x00\x00\x00\x00\x00\x40')
def testSignalvalueGetDouble(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 56, 64, endianness='big', signaltype='double')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1 = canframe.CanFrame(1, b'\x40\x00\x00\x00\x00\x00\x00\x00')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 2.0)
frame1 = canframe.CanFrame(1, b'\x3F\xF0\x00\x00\x00\x00\x00\x00')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 1.0)
frame1 = canframe.CanFrame(1, b'\x3F\xF0\x00\x00\x00\x00\x00\x01')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 1.0)
frame1 = canframe.CanFrame(1, b'\x7F\xEF\xFF\xFF\xFF\xFF\xFF\xFF')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 1.7976931348623157e308)
def testSignalvalueGetDoubleLittle(self):
testsig1 = cansignal.CanSignalDefinition('testsignal1', 0, 64, endianness='little', signaltype='double')
# Example from https://en.wikipedia.org/wiki/Single-precision_floating-point_format
frame1 = canframe.CanFrame(1, b'\x00\x00\x00\x00\x00\x00\xF0\x3F')
self.assertAlmostEqual(frame1.get_signalvalue(testsig1), 1.0)
def testSignalvalueSetTooShortFrame(self):
self.frame.frame_data = b'\00'
self.assertRaises(exceptions.CanException, self.frame.set_signalvalue, self.testsig1)
def testSignalvalueGetSetMin(self):
self.testsig3.minvalue = 0
self.frame.set_signalvalue(self.testsig3, 0)
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 0)
self.testsig3.minvalue = 100
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 100) # Minvalue for get
self.testsig3.minvalue = 400
self.frame.set_signalvalue(self.testsig3, 60) # Minvalue for set
self.testsig3.minvalue = 10
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 400)
def testSignalvalueGetSetMax(self):
self.testsig3.maxvalue = 800
self.frame.set_signalvalue(self.testsig3, 900) # Maxvalue for set
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 800)
self.testsig3.maxvalue = 100
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 100) # Maxvalue for get
def testSignalvalueSetWrongValue(self):
self.assertRaises(exceptions.CanException, self.frame.set_signalvalue, self.testsig1, -1)
self.assertRaises(exceptions.CanException, self.frame.set_signalvalue, self.testsig1, 2)
def testSignalvalueGet(self):
self.assertEqual(self.frame.get_signalvalue(self.testsig1), 1)
self.assertEqual(self.frame.get_signalvalue(self.testsig2), 2)
self.assertEqual(self.frame.get_signalvalue(self.testsig3), 8)
def testSignalvalueGetSigned(self):
self.frame.frame_data = b'\x00\x00\x00\x00\x00\x00\x80\x00'
self.assertEqual(self.frame.get_signalvalue(self.testsig4), -128)
def testGetRawFrameStandard(self):
self.assertEqual(self.frame.get_rawframe(),
b'\x01\x00\x00\x00\x08\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\xff')
def testGetRawFrameExtended(self):
self.frame.frame_format = 'extended'
self.assertEqual(self.frame.get_rawframe(),
b'\x01\x00\x00\x80\x08\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\xff')
def testUnpack(self):
frame_defs = {self.frame_def.frame_id: self.frame_def}
result = self.frame.unpack(frame_defs)
self.assertEqual(len(result), 4)
self.assertEqual(result['testsignal1'], 1)
self.assertEqual(result['testsignal2'], 2)
self.assertEqual(result['testsignal3'], 8)
self.assertEqual(result['testsignal4'], 0)
def testUnpackWrongFrameId(self):
self.frame.frame_id = 2
frame_defs = {self.frame_def.frame_id: self.frame_def}
result = self.frame.unpack(frame_defs)
self.assertEqual(len(result), 0)
def testUnpackWrongFramelength(self):
self.frame.frame_data = b'\x00\x02'
frame_defs = {self.frame_def.frame_id: self.frame_def}
self.assertRaises(exceptions.CanException, self.frame.unpack, frame_defs)
def testRepr(self):
result = repr(self.frame)
known_result = "CAN frame ID: 1 (0x001, standard) data: 00 02 00 08 00 00 00 FF (8 bytes)"
self.assertEqual(result.strip(), known_result.strip())
def testLen(self):
self.assertEqual(len(self.frame), 8)
self.assertEqual(len(self.frame.frame_data), 8)
def testGetDescriptiveAsciiArt(self):
result = self.frame.get_descriptive_ascii_art()
print('\n\n' + result) # Check the output manually
if __name__ == '__main__':
# Run all tests #
unittest.main()
# Run a single test #
# suite = unittest.TestSuite()
# suite.addTest(TestCanFrame("testGetDescriptiveAsciiArt"))
# unittest.TextTestRunner(verbosity=2).run(suite)
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library to make common google storage operations more reliable.
"""
import logging
import os
from chromite.buildbot import constants
from chromite.lib import cache
from chromite.lib import cros_build_lib
from chromite.lib import osutils
# Default pathway; stored here rather than usual buildbot.constants since
# we don't want to import buildbot code from here.
# Note that this value is reset after GSContext via the GetDefaultGSUtilBin
# method; we set it initially here just for the sake of making clear it
# exists.
GSUTIL_BIN = None
PUBLIC_BASE_HTTPS_URL = 'https://commondatastorage.googleapis.com/'
PRIVATE_BASE_HTTPS_URL = 'https://sandbox.google.com/storage/'
BASE_GS_URL = 'gs://'
def CanonicalizeURL(url, strict=False):
"""Convert provided URL to gs:// URL, if it follows a known format.
Arguments:
url: URL to canonicalize.
strict: Raises exception if URL cannot be canonicalized.
"""
for prefix in (PUBLIC_BASE_HTTPS_URL, PRIVATE_BASE_HTTPS_URL):
if url.startswith(prefix):
return url.replace(prefix, BASE_GS_URL)
if not url.startswith(BASE_GS_URL) and strict:
raise ValueError('Url %r cannot be canonicalized.' % url)
return url
def GetGsURL(bucket, for_gsutil=False, public=True, suburl=''):
"""Construct a Google Storage URL
Args:
bucket: The Google Storage bucket to use
for_gsutil: Do you want a URL for passing to `gsutil`?
public: Do we want the public or private url
suburl: A url fragment to tack onto the end
Returns:
The fully constructed URL
"""
if for_gsutil:
urlbase = BASE_GS_URL
else:
urlbase = PUBLIC_BASE_HTTPS_URL if public else PRIVATE_BASE_HTTPS_URL
return '%s%s/%s' % (urlbase, bucket, suburl)
class GSContextException(Exception):
"""Thrown when expected google storage preconditions are not met."""
class GSContextPreconditionFailed(GSContextException):
"""Thrown when google storage returns code=PreconditionFailed."""
class GSNoSuchKey(GSContextException):
"""Thrown when google storage returns code=NoSuchKey."""
class GSContext(object):
"""A class to wrap common google storage operations."""
DEFAULT_BOTO_FILE = os.path.expanduser('~/.boto')
# This is set for ease of testing.
DEFAULT_GSUTIL_BIN = None
DEFAULT_GSUTIL_BUILDER_BIN = '/b/build/third_party/gsutil/gsutil'
# How many times to retry uploads.
DEFAULT_RETRIES = 10
# Multiplier for how long to sleep (in seconds) between retries; will delay
# (1*sleep) the first time, then (2*sleep), continuing via attempt * sleep.
DEFAULT_SLEEP_TIME = 60
GSUTIL_TAR = 'gsutil-3.10.tar.gz'
GSUTIL_URL = PUBLIC_BASE_HTTPS_URL + 'chromeos-public/%s' % GSUTIL_TAR
@classmethod
def GetDefaultGSUtilBin(cls):
if cls.DEFAULT_GSUTIL_BIN is None:
gsutil_bin = cls.DEFAULT_GSUTIL_BUILDER_BIN
if not os.path.exists(gsutil_bin):
gsutil_bin = osutils.Which('gsutil')
cls.DEFAULT_GSUTIL_BIN = gsutil_bin
return cls.DEFAULT_GSUTIL_BIN
@classmethod
def Cached(cls, cache_dir, *args, **kwargs):
"""Reuses previously fetched GSUtil, performing the fetch if necessary.
Arguments:
cache_dir: The toplevel cache dir.
*args, **kwargs: Arguments that are passed through to the GSContext()
constructor.
Returns:
An initialized GSContext() object.
"""
common_path = os.path.join(cache_dir, constants.COMMON_CACHE)
tar_cache = cache.TarballCache(common_path)
key = (cls.GSUTIL_TAR,)
# The common cache will not be LRU, removing the need to hold a read
# lock on the cached gsutil.
ref = tar_cache.Lookup(key)
if ref.Exists():
logging.debug('Reusing cached gsutil.')
else:
logging.debug('Fetching gsutil.')
with osutils.TempDirContextManager(
base_dir=tar_cache.staging_dir) as tempdir:
gsutil_tar = os.path.join(tempdir, cls.GSUTIL_TAR)
cros_build_lib.RunCurl([cls.GSUTIL_URL, '-o', gsutil_tar],
debug_level=logging.DEBUG)
ref.SetDefault(gsutil_tar)
gsutil_bin = os.path.join(ref.path, 'gsutil', 'gsutil')
return cls(*args, gsutil_bin=gsutil_bin, **kwargs)
def __init__(self, boto_file=None, acl_file=None, dry_run=False,
gsutil_bin=None, init_boto=False, retries=None, sleep=None):
"""Constructor.
Args:
boto_file: Fully qualified path to user's .boto credential file.
acl_file: A permission file capable of setting different permissions
for different sets of users.
dry_run: Testing mode that prints commands that would be run.
gsutil_bin: If given, the absolute path to the gsutil binary. Else
the default fallback will be used.
init_boto: If set to True, GSContext will check during __init__ if a
valid boto config is configured, and if not, will attempt to ask the
user to interactively set up the boto config.
retries: Number of times to retry a command before failing.
sleep: Amount of time to sleep between failures.
"""
if gsutil_bin is None:
gsutil_bin = self.GetDefaultGSUtilBin()
self._CheckFile('gsutil not found', gsutil_bin)
self.gsutil_bin = gsutil_bin
# Prefer boto_file if specified, else prefer the env then the default.
if boto_file is None:
boto_file = os.environ.get('BOTO_CONFIG', self.DEFAULT_BOTO_FILE)
self.boto_file = boto_file
if acl_file is not None:
self._CheckFile('Not a valid permissions file', acl_file)
self.acl_file = acl_file
self.dry_run = dry_run
self._retries = self.DEFAULT_RETRIES if retries is None else int(retries)
self._sleep_time = self.DEFAULT_SLEEP_TIME if sleep is None else int(sleep)
if init_boto:
self._InitBoto()
self._CheckFile('Boto credentials not found', self.boto_file)
def _CheckFile(self, errmsg, afile):
"""Pre-flight check for valid inputs.
Args:
errmsg: Error message to display.
afile: Fully qualified path to test file existance.
"""
if not os.path.isfile(afile):
raise GSContextException('%s, %s is not a file' % (errmsg, afile))
def _TestGSLs(self):
"""Quick test of gsutil functionality."""
result = self._DoCommand(['ls'], retries=0, debug_level=logging.DEBUG,
redirect_stderr=True, error_code_ok=True)
return not (result.returncode == 1 and
'no configured credentials' in result.error)
def _ConfigureBotoConfig(self):
"""Make sure we can access protected bits in GS."""
print('Configuring gsutil. **Please use your @google.com account.**')
try:
self._DoCommand(['config'], retries=0, debug_level=logging.CRITICAL,
print_cmd=False)
finally:
if (os.path.exists(self.boto_file) and not
os.path.getsize(self.boto_file)):
os.remove(self.boto_file)
raise GSContextException('GS config could not be set up.')
def _InitBoto(self):
if not self._TestGSLs():
self._ConfigureBotoConfig()
def Cat(self, path):
"""Returns the contents of a GS object."""
return self._DoCommand(['cat', path], redirect_stdout=True)
def CopyInto(self, local_path, remote_dir, filename=None, acl=None,
version=None):
"""Upload a local file into a directory in google storage.
Args:
local_path: Local file path to copy.
remote_dir: Full gs:// url of the directory to transfer the file into.
filename: If given, the filename to place the content at; if not given,
it's discerned from basename(local_path).
acl: If given, a canned ACL.
version: If given, the generation; essentially the timestamp of the last
update. Note this is not the same as sequence-number; it's
monotonically increasing bucket wide rather than reset per file.
The usage of this is if we intend to replace/update only if the version
is what we expect. This is useful for distributed reasons- for example,
to ensure you don't overwrite someone else's creation, a version of
0 states "only update if no version exists".
"""
filename = filename if filename is not None else local_path
# Basename it even if an explicit filename was given; we don't want
# people using filename as a multi-directory path fragment.
return self.Copy(local_path,
'%s/%s' % (remote_dir, os.path.basename(filename)),
acl=acl, version=version)
def _RunCommand(self, cmd, **kwargs):
try:
return cros_build_lib.RunCommand(cmd, **kwargs)
# gsutil uses the same exit code for any failure, so we are left to
# parse the output as needed.
except cros_build_lib.RunCommandError as e:
error = e.result.error
if error and 'GSResponseError' in error:
if 'code=PreconditionFailed' in error:
raise GSContextPreconditionFailed(e)
if 'code=NoSuchKey' in error:
raise GSNoSuchKey(e)
raise
def _DoCommand(self, gsutil_cmd, headers=(), retries=None, **kwargs):
"""Run a gsutil command, suppressing output, and setting retry/sleep.
Returns:
A RunCommandResult object.
"""
cmd = [self.gsutil_bin]
for header in headers:
cmd += ['-h', header]
cmd.extend(gsutil_cmd)
if retries is None:
retries = self._retries
extra_env = kwargs.pop('extra_env', {})
extra_env.setdefault('BOTO_CONFIG', self.boto_file)
if self.dry_run:
logging.debug("%s: would've ran %r", self.__class__.__name__, cmd)
else:
return cros_build_lib.RetryCommand(
self._RunCommand, retries, cmd, sleep=self._sleep_time,
extra_env=extra_env, **kwargs)
def Copy(self, src_path, dest_path, acl=None, version=None, **kwargs):
"""Copy to/from GS bucket.
Canned ACL permissions can be specified on the gsutil cp command line.
More info:
https://developers.google.com/storage/docs/accesscontrol#applyacls
Args:
src_path: Fully qualified local path or full gs:// path of the src file.
dest_path: Fully qualified local path or full gs:// path of the dest
file.
acl: One of the google storage canned_acls to apply.
version: If given, the generation; essentially the timestamp of the last
update. Note this is not the same as sequence-number; it's
monotonically increasing bucket wide rather than reset per file.
The usage of this is if we intend to replace/update only if the version
is what we expect. This is useful for distributed reasons- for example,
to ensure you don't overwrite someone else's creation, a version of
0 states "only update if no version exists".
Raises:
RunCommandError if the command failed despite retries.
Returns:
Return the CommandResult from the run.
"""
cmd, headers = [], []
if version is not None:
headers = ['x-goog-if-generation-match:%d' % version]
cmd.append('cp')
acl = self.acl_file if acl is None else acl
if acl is not None:
cmd += ['-a', acl]
cmd += ['--', src_path, dest_path]
# For ease of testing, only pass headers if we got some.
if headers:
kwargs['headers'] = headers
return self._DoCommand(cmd, redirect_stderr=True, **kwargs)
def LS(self, path):
"""Does a directory listing of the given gs path."""
return self._DoCommand(['ls', '--', path], redirect_stdout=True)
def SetACL(self, upload_url, acl=None):
"""Set access on a file already in google storage.
Args:
upload_url: gs:// url that will have acl applied to it.
acl: An ACL permissions file or canned ACL.
"""
if acl is None:
if not self.acl_file:
raise GSContextException(
"SetAcl invoked w/out a specified acl, nor a default acl.")
acl = self.acl_file
self._DoCommand(['setacl', acl, upload_url])
def Exists(self, path):
"""Checks whether the given object exists.
Args:
path: Full gs:// url of the path to check.
Returns:
True if the path exists; otherwise returns False.
"""
try:
self._DoCommand(['getacl', path], redirect_stdout=True,
redirect_stderr=True)
except GSNoSuchKey:
return False
return True
# Set GSUTIL_BIN now.
GSUTIL_BIN = GSContext.GetDefaultGSUtilBin()
|
|
"""Transformed histograms.
These histograms use a transformation from input values to bins
in a different coordinate system.
There are three basic classes:
* PolarHistogram
* CylindricalHistogram
* SphericalHistogram
Apart from these, there are their projections into lower dimensions.
And of course, it is possible to re-use the general transforming functionality
by adding `TransformedHistogramMixin` among the custom histogram
class superclasses.
"""
from __future__ import annotations
import abc
from functools import reduce
from typing import TYPE_CHECKING, overload
import numpy as np
from physt.histogram1d import Histogram1D
from physt.histogram_nd import HistogramND
from physt.util import deprecation_alias
from . import binnings, histogram_nd
if TYPE_CHECKING:
from typing import Dict, Optional, Tuple, Type, Union
from physt.typing_aliases import ArrayLike, Axis, RangeTuple
FULL_PHI_RANGE: RangeTuple = (0, 2 * np.pi)
FULL_THETA_RANGE: RangeTuple = (0, np.pi)
DEFAULT_PHI_BINS: int = 16
DEFAULT_THETA_BINS: int = 16
class TransformedHistogramMixin(abc.ABC):
"""Histogram with non-cartesian (or otherwise transformed) axes.
This is a mixin, providing transform-aware find_bin, fill and fill_n.
When implementing, you are required to provide tbe following:
- `_transform_correct_dimension` method to convert rectangular (it must be a classmethod)
- `bin_sizes` property
In certain cases, you may want to have default axis names + projections.
Look at PolarHistogram / SphericalHistogram / CylindricalHistogram as
an example.
"""
@classmethod
@abc.abstractmethod
def _transform_correct_dimension(cls, value: np.ndarray):
...
def find_bin(self, value: ArrayLike, axis: Optional[Axis] = None, transformed: bool = False):
"""
Parameters
----------
value : array_like
Value with dimensionality equal to histogram.
transformed : bool
If true, the value is already transformed and has same axes as the bins.
"""
if axis is None and not transformed:
value = self.transform(value)
return super().find_bin(value, axis=axis) # type: ignore
@property
@abc.abstractmethod
def bin_sizes(self):
...
def fill(
self,
value: ArrayLike,
weight: Optional[ArrayLike] = 1,
*,
transformed: bool = False,
**kwargs,
):
if not transformed:
value = self.transform(value)
return super().fill(value=value, weight=weight, **kwargs) # type: ignore
def fill_n(
self,
values: ArrayLike,
weights: Optional[ArrayLike] = None,
*,
dropna: bool = True,
transformed: bool = False,
**kwargs,
):
if not transformed:
values = self.transform(values)
super().fill_n(values=values, weights=weights, dropna=dropna, **kwargs) # type: ignore
_projection_class_map: Dict[Tuple[int, ...], type] = {}
source_ndim: Union[int, Tuple[int, ...]]
def projection(self, *axes, **kwargs):
"""Projection to lower-dimensional histogram.
The inheriting class should implement the _projection_class_map
class attribute to suggest class for the projection. If the
arguments don't match any of the map keys, HistogramND is used.
"""
axes, _ = self._get_projection_axes(*axes)
axes = tuple(sorted(axes))
if axes in self._projection_class_map:
klass = self._projection_class_map[axes]
return HistogramND.projection(self, *axes, type=klass, **kwargs)
return HistogramND.projection(self, *axes, **kwargs)
@classmethod
def _validate_source_dimension(cls, value: np.ndarray) -> None:
source_ndims = [cls.source_ndim] if isinstance(cls.source_ndim, int) else cls.source_ndim
if not len(value.shape) <= 2 or value.shape[-1] not in source_ndims:
raise ValueError(
f"{cls.__name__} can transform only arrays with shape (N, {cls.source_ndim})"
f" or ({cls.source_ndim},), {value.shape} given."
)
@classmethod
def transform(cls, value: ArrayLike) -> Union[np.ndarray, float]:
"""Convert cartesian (general) coordinates into internal ones.
Parameters
----------
value : array_like
This method should accept both scalars and numpy arrays.
If multiple values are to be transformed, it should of
(nvalues, ndim) shape.
Note: Implement _
"""
value = np.atleast_1d(np.asarray(value, dtype=np.float64))
cls._validate_source_dimension(value)
return cls._transform_correct_dimension(value)
class RadialHistogram(TransformedHistogramMixin, Histogram1D):
"""Projection of polar histogram to 1D with respect to radius.
This is a special case of a 1D histogram with transformed coordinates.
"""
default_axis_names = [
"r",
]
source_ndim = (2, 3)
@property
def bin_sizes(self):
return (self.bin_right_edges ** 2 - self.bin_left_edges ** 2) * np.pi
@classmethod
def _transform_correct_dimension(cls, value):
if value.shape[-1] == 2:
return np.hypot(value[..., 1], value[..., 0])
else:
return np.hypot(np.hypot(value[..., 1], value[..., 0]), value[..., 2])
class AzimuthalHistogram(TransformedHistogramMixin, Histogram1D):
"""Projection of polar histogram to 1D with respect to phi.
This is a special case of a 1D histogram with transformed coordinates.
"""
default_axis_names = ["phi"]
default_init_values = {"radius": 1}
source_ndim = 2
@classmethod
def _transform_correct_dimension(cls, value):
return np.arctan2(value[..., 1], value[..., 0]) % (2 * np.pi)
@property
def bin_sizes(self):
return self.bin_widths
@property
def radius(self):
"""Radius of the surface.
Useful for calculating densities.
"""
return self._meta_data.get("radius", 1)
@radius.setter
def radius(self, value):
self._meta_data["radius"] = value
class PolarHistogram(TransformedHistogramMixin, HistogramND):
"""2D histogram in polar coordinates.
This is a special case of a 2D histogram with transformed coordinates:
- r as radius in the (0, +inf) range
- phi as azimuthal angle in the (0, 2*pi) range
"""
default_axis_names = ["r", "phi"]
source_ndim = 2
@property
def bin_sizes(self):
sizes = 0.5 * (self.get_bin_right_edges(0) ** 2 - self.get_bin_left_edges(0) ** 2)
sizes = np.outer(sizes, self.get_bin_widths(1))
return sizes
@classmethod
def _transform_correct_dimension(cls, value):
result = np.empty_like(value)
result[..., 0] = np.hypot(value[..., 1], value[..., 0])
result[..., 1] = np.arctan2(value[..., 1], value[..., 0]) % (2 * np.pi)
return result
_projection_class_map = {(0,): RadialHistogram, (1,): AzimuthalHistogram}
class SphericalSurfaceHistogram(TransformedHistogramMixin, HistogramND):
"""2D histogram in spherical coordinates.
This is a special case of a 2D histogram with transformed coordinates:
- theta as angle between z axis and the vector, in the (0, 2*pi) range
- phi as azimuthal angle (in the xy projection) in the (0, 2*pi) range
"""
@property
def bin_sizes(self):
sizes1 = np.cos(self.get_bin_left_edges(0)) - np.cos(self.get_bin_right_edges(0))
sizes2 = self.get_bin_widths(1)
return reduce(np.multiply, np.ix_(sizes1, sizes2))
default_axis_names = ["theta", "phi"]
default_init_values = {"radius": 1}
source_ndim = 3
@property
def radius(self) -> float:
"""Radius of the surface.
Useful for calculating densities.
"""
return self._meta_data.get("radius", 1)
@radius.setter
def radius(self, value: float):
self._meta_data["radius"] = value
@classmethod
def _transform_correct_dimension(cls, value):
result = np.ndarray((*value.shape[:-1], 2))
x, y, z = value.T
xy = np.hypot(x, y) # pylint: disable=invalid-name
result[..., 0] = np.arctan2(xy, z) % (2 * np.pi)
result[..., 1] = np.arctan2(y, x) % (2 * np.pi)
return result
class SphericalHistogram(TransformedHistogramMixin, HistogramND):
"""3D histogram in spherical coordinates.
This is a special case of a 3D histogram with transformed coordinates:
- r as radius in the (0, +inf) range
- theta as angle between z axis and the vector, in the (0, 2*pi) range
- phi as azimuthal angle (in the xy projection) in the (0, 2*pi) range
"""
default_axis_names = ["r", "theta", "phi"]
source_ndim = 3
@classmethod
def _transform_correct_dimension(cls, value):
result = np.empty_like(value)
x, y, z = value.T
xy = np.hypot(x, y)
result[..., 0] = np.hypot(xy, z)
result[..., 1] = np.arctan2(xy, z) % (2 * np.pi)
result[..., 2] = np.arctan2(y, x) % (2 * np.pi)
return result
@property
def bin_sizes(self):
sizes1 = (self.get_bin_right_edges(0) ** 3 - self.get_bin_left_edges(0) ** 3) / 3
sizes2 = np.cos(self.get_bin_left_edges(1)) - np.cos(self.get_bin_right_edges(1))
sizes3 = self.get_bin_widths(2)
# Hopefully correct
return reduce(np.multiply, np.ix_(sizes1, sizes2, sizes3))
# return np.outer(sizes, sizes2, self.get_bin_widths(2)) # Correct
_projection_class_map = {(1, 2): SphericalSurfaceHistogram, (0,): RadialHistogram}
class CylindricalSurfaceHistogram(TransformedHistogramMixin, HistogramND):
"""2D histogram in coordinates on cylinder surface.
This is a special case of a 2D histogram with transformed coordinates:
- phi as azimuthal angle (in the xy projection) in the (0, 2*pi) range
- z as the last direction without modification, in (-inf, +inf) range
Attributes
----------
radius: float
The radius of the surface. Useful for plotting
"""
default_axis_names = ["rho", "phi", "z"]
default_init_values = {"radius": 1}
source_ndim = 3
@classmethod
def _transform_correct_dimension(cls, value):
result = np.ndarray((*value.shape[-1], 2))
x, y, z = value.T
result[..., 0] = np.arctan2(y, x) % (2 * np.pi) # phi
result[..., 1] = z
return result
@property
def radius(self) -> float:
"""Radius of the cylindrical surface.
Useful for calculating densities.
"""
return self._meta_data.get("radius", 1)
@radius.setter
def radius(self, value: float):
self._meta_data["radius"] = float(value)
@property
def bin_sizes(self) -> np.ndarray:
sizes1 = self.get_bin_widths(0)
sizes2 = self.get_bin_widths(1)
return reduce(np.multiply, np.ix_(sizes1, sizes2))
_projection_class_map = {(0,): AzimuthalHistogram}
class CylindricalHistogram(TransformedHistogramMixin, HistogramND):
"""3D histogram in cylindrical coordinates.
This is a special case of a 3D histogram with transformed coordinates:
- r as radius projection to xy plane in the (0, +inf) range
- phi as azimuthal angle (in the xy projection) in the (0, 2*pi) range
- z as the last direction without modification, in (-inf, +inf) range
"""
default_axis_names = ["rho", "phi", "z"]
source_ndim = 3
@classmethod
def _transform_correct_dimension(cls, value):
result = np.empty_like(value)
x, y, z = value.T
result[..., 0] = np.hypot(x, y) # tho
result[..., 1] = np.arctan2(y, x) % (2 * np.pi) # phi
result[..., 2] = z
return result
@property
def bin_sizes(self):
sizes1 = 0.5 * (self.get_bin_right_edges(0) ** 2 - self.get_bin_left_edges(0) ** 2)
sizes2 = self.get_bin_widths(1)
sizes3 = self.get_bin_widths(2)
return reduce(np.multiply, np.ix_(sizes1, sizes2, sizes3))
_projection_class_map = {
(0,): RadialHistogram,
(1,): AzimuthalHistogram,
(0, 1): PolarHistogram,
(1, 2): CylindricalSurfaceHistogram,
}
def projection(self, *axes, **kwargs):
result = TransformedHistogramMixin.projection(self, *axes, **kwargs)
if isinstance(result, CylindricalSurfaceHistogram):
result.radius = self.get_bin_right_edges(0)[-1]
return result
def polar(
xdata: ArrayLike,
ydata: ArrayLike,
*,
radial_bins="numpy",
radial_range: Optional[RangeTuple] = None,
phi_bins=DEFAULT_PHI_BINS,
phi_range: RangeTuple = (0, 2 * np.pi),
dropna: bool = False,
weights: Optional[ArrayLike] = None,
transformed: bool = False,
**kwargs,
) -> PolarHistogram:
"""Facade construction function for the PolarHistogram."""
if "range" in kwargs:
raise ValueError("Please, use `radial_range` and `phi_range` arguments instead of `range`")
xdata = np.asarray(xdata)
ydata = np.asarray(ydata)
data: np.ndarray = np.concatenate([xdata[:, np.newaxis], ydata[:, np.newaxis]], axis=1)
data = _prepare_data(data, transformed=transformed, klass=PolarHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_bins = np.linspace(*phi_range, phi_bins + 1)
bin_schemas = binnings.calculate_bins_nd(
data,
[radial_bins, phi_bins],
range=[radial_range, None],
check_nan=not dropna,
**kwargs,
)
return PolarHistogram.from_calculate_frequencies(
data, binnings=bin_schemas, weights=weights, **kwargs
)
def azimuthal(
xdata: ArrayLike,
ydata: Optional[ArrayLike] = None,
*,
bins=DEFAULT_PHI_BINS,
range: RangeTuple = (0, 2 * np.pi),
dropna: bool = False,
weights=None,
transformed: bool = False,
**kwargs,
) -> AzimuthalHistogram:
"""Facade function to create an AzimuthalHistogram."""
if transformed:
data: np.ndarray = np.asarray(xdata)
if ydata is not None:
raise ValueError(
"With `transformed==True`, you can provide only one positional argument (xdata)."
)
else:
data = np.concatenate(
[np.asarray(xdata)[:, np.newaxis], np.asarray(ydata)[:, np.newaxis]], axis=1
)
data = _prepare_data(data, transformed=False, klass=AzimuthalHistogram, dropna=dropna)
if isinstance(bins, int):
bins = np.linspace(*range, bins + 1)
bin_schema = binnings.calculate_bins(data, bins, range=range, check_nan=not dropna, **kwargs)
return AzimuthalHistogram.from_calculate_frequencies(
data=data, binning=bin_schema, weights=weights
)
def radial(
xdata: ArrayLike,
ydata: Optional[ArrayLike] = None,
zdata: Optional[ArrayLike] = None,
*,
bins="numpy",
range: Optional[RangeTuple] = None,
dropna: bool = False,
weights: Optional[ArrayLike] = None,
transformed: bool = False,
**kwargs,
) -> RadialHistogram:
"""Facade function to create a radial histogram."""
# Contruct source data
xdata = np.asarray(xdata)
if transformed:
data = np.asarray(xdata)
if ydata is not None or zdata is not None:
raise ValueError(
"With `transformed==True`, you can provide only one positional argument (xdata)."
)
elif xdata.ndim > 1 and xdata.shape[-1] == 3:
data = xdata
if ydata is not None or zdata is not None:
raise ValueError(
"With 3D first argument (`xdata`), you cannot provide other positional arguments."
)
else:
ydata = np.asarray(ydata)
if zdata is None:
data = np.concatenate([xdata[:, np.newaxis], ydata[:, np.newaxis]], axis=1)
else:
zdata = np.asarray(zdata)
data = np.concatenate(
[xdata[:, np.newaxis], ydata[:, np.newaxis], zdata[:, np.newaxis]], axis=1
)
data = _prepare_data(data, transformed=transformed, klass=RadialHistogram, dropna=dropna)
bin_schema = binnings.calculate_bins(data, bins, range=range, check_nan=not dropna, **kwargs)
return RadialHistogram.from_calculate_frequencies(
data=data, binning=bin_schema, weights=weights
)
def spherical(
data: ArrayLike,
*,
radial_bins="numpy",
theta_bins=DEFAULT_THETA_BINS,
phi_bins=DEFAULT_PHI_BINS,
dropna: bool = True,
transformed: bool = False,
theta_range: RangeTuple = (0, np.pi),
phi_range: RangeTuple = (0, 2 * np.pi),
radial_range: Optional[RangeTuple] = None,
weights=None,
**kwargs,
) -> SphericalHistogram:
"""Facade function to create a speherical histogram."""
if "range" in kwargs:
raise ValueError(
"Please, use `radial_range`, `theta_range` and `phi_range` arguments instead of `range`"
)
data = _prepare_data(data, transformed=transformed, klass=SphericalHistogram, dropna=dropna)
if isinstance(theta_bins, int):
theta_bins = np.linspace(*theta_range, theta_bins + 1)
if isinstance(phi_bins, int):
phi_bins = np.linspace(*phi_range, phi_bins + 1)
try:
bin_schemas = binnings.calculate_bins_nd(
data,
[radial_bins, theta_bins, phi_bins],
range=[radial_range, None, None],
check_nan=not dropna,
**kwargs,
)
except ValueError as err:
if "Bins not in rising order" in str(err):
if data is not None and np.isclose(data[:, 0].min(), data[:, 0].max()):
raise ValueError(
f"All radii seem to be the same: {data[:,0].min():,.4f}. "
"Perhaps you wanted to use `spherical_surface_histogram` instead or set radius bins explicitly?"
)
raise
return SphericalHistogram.from_calculate_frequencies(
data, binnings=bin_schemas, weights=weights
)
def spherical_surface(
data: ArrayLike,
*,
theta_bins=DEFAULT_THETA_BINS,
phi_bins=DEFAULT_PHI_BINS,
transformed: bool = False,
radius: Optional[float] = None,
dropna: bool = False,
weights: Optional[ArrayLike] = None,
theta_range: RangeTuple = FULL_THETA_RANGE,
phi_range: RangeTuple = FULL_PHI_RANGE,
**kwargs,
) -> SphericalSurfaceHistogram:
"""Facade construction function for the SphericalSurfaceHistogram."""
transformed_data = _prepare_data(
data, transformed=transformed, klass=SphericalSurfaceHistogram, dropna=dropna
)
if "range" in kwargs:
raise ValueError("Please, use `theta_range` and `phi_range` arguments instead of `range`")
if radius is None:
radius = 1
if isinstance(theta_bins, int):
theta_bins = np.linspace(*theta_range, theta_bins + 1)
if isinstance(phi_bins, int):
phi_bins = np.linspace(*phi_range, phi_bins + 1)
bin_schemas = binnings.calculate_bins_nd(
transformed_data, [theta_bins, phi_bins], check_nan=not dropna, **kwargs
)
return SphericalSurfaceHistogram.from_calculate_frequencies(
transformed_data, binnings=bin_schemas, weights=weights, radius=radius, **kwargs
)
def cylindrical(
data: Optional[ArrayLike] = None,
*,
rho_bins="numpy",
phi_bins=16,
z_bins="numpy",
transformed: bool = False,
dropna: bool = True,
rho_range: Optional[RangeTuple] = None,
phi_range: RangeTuple = FULL_PHI_RANGE,
weights: Optional[ArrayLike] = None,
z_range: Optional[RangeTuple] = None,
**kwargs,
) -> CylindricalHistogram:
"""Facade function to create a cylindrical histogram."""
if "range" in kwargs:
raise ValueError(
"Please, use `rho_range`, `phi_range` and `z_range` arguments instead of `range`"
)
data = _prepare_data(data, transformed=transformed, klass=CylindricalHistogram, dropna=dropna)
if isinstance(phi_bins, int):
phi_bins = np.linspace(*phi_range, phi_bins + 1)
bin_schemas = binnings.calculate_bins_nd(
data,
[rho_bins, phi_bins, z_bins],
range=[rho_range, None, z_range],
check_nan=not dropna,
**kwargs,
)
return CylindricalHistogram.from_calculate_frequencies(
data, binnings=bin_schemas, weights=weights, **kwargs
)
def cylindrical_surface(
data=None,
*,
phi_bins=16,
z_bins="numpy",
transformed: bool = False,
radius: Optional[float] = None,
dropna: bool = False,
weights=None,
phi_range: RangeTuple = FULL_PHI_RANGE,
z_range: Optional[RangeTuple] = None,
**kwargs,
) -> CylindricalSurfaceHistogram:
"""Facade function to create a cylindrical surface histogram."""
if "range" in kwargs:
raise ValueError("Please, use `phi_range` and `z_range` arguments instead of `range`")
transformed_data = _prepare_data(
data, transformed=transformed, klass=CylindricalHistogram, dropna=dropna
)
if transformed_data is not None:
if not transformed and radius is None:
radius = np.hypot(data[:, 0], data[:, 1])
if radius is None:
radius = 1
if isinstance(phi_bins, int):
phi_bins = np.linspace(*phi_range, phi_bins + 1)
bin_schemas = binnings.calculate_bins_nd(
transformed_data,
[phi_bins, z_bins],
range=[None, z_range],
check_nan=not dropna,
**kwargs,
)
frequencies, errors2, missed = histogram_nd.calculate_frequencies(
data, binnings=bin_schemas, weights=weights
)
return CylindricalSurfaceHistogram(
binnings=bin_schemas,
frequencies=frequencies,
errors2=errors2,
radius=radius,
missed=missed,
)
azimuthal_histogram = deprecation_alias(azimuthal, "azimuthal_histogram")
radial_histogram = deprecation_alias(radial, "radial_histogram")
polar_histogram = deprecation_alias(polar, "polar_histogram")
spherical_histogram = deprecation_alias(polar, "spherical_histogram")
spherical_surface_histogram = deprecation_alias(polar, "spherical_surface_histogram")
cylindrical_histogram = deprecation_alias(cylindrical, "cylindrical_histogram")
cylindrical_surface_histogram = deprecation_alias(
cylindrical_surface, "cylindrical_surface_histogram"
)
@overload
def _prepare_data(
data: ArrayLike,
transformed: bool,
klass: Type[TransformedHistogramMixin],
*,
dropna: bool = False,
) -> np.ndarray:
...
@overload
def _prepare_data(
data: None, transformed: bool, klass: Type[TransformedHistogramMixin], *, dropna: bool = False
) -> None:
...
def _prepare_data(
data: Optional[ArrayLike],
transformed: bool,
klass: Type[TransformedHistogramMixin],
*,
dropna: bool = False,
) -> Optional[np.ndarray]:
"""Transform data for binning."""
if data is None:
return None
data_: np.ndarray = np.asarray(data)
if dropna:
data_ = data_[~np.isnan(data_).any(axis=1)]
if not transformed:
# TODO: Perhaps we should be able to disi
data_ = klass.transform(data_) # type: ignore
return data_
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.