gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import collections
import copy
import functools
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from keystoneauth1 import exceptions as keystone_exception
from keystoneauth1 import loading as ks_loading
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import strutils
import six
from nova import availability_zones as az
import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import service_auth
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
_SESSION = None
def reset_globals():
"""Testing method to reset globals.
"""
global _SESSION
_SESSION = None
def cinderclient(context):
global _SESSION
if not _SESSION:
_SESSION = ks_loading.load_session_from_conf_options(
CONF, nova.conf.cinder.cinder_group.name)
url = None
endpoint_override = None
auth = service_auth.get_auth_plugin(context)
service_type, service_name, interface = CONF.cinder.catalog_info.split(':')
service_parameters = {'service_type': service_type,
'service_name': service_name,
'interface': interface,
'region_name': CONF.cinder.os_region_name}
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
endpoint_override = url
else:
url = _SESSION.get_endpoint(auth, **service_parameters)
# TODO(jamielennox): This should be using proper version discovery from
# the cinder service rather than just inspecting the URL for certain string
# values.
version = cinder_client.get_volume_api_from_url(url)
if version == '1':
raise exception.UnsupportedCinderAPIVersion(version=version)
return cinder_client.Client(version,
session=_SESSION,
auth=auth,
endpoint_override=endpoint_override,
connect_retries=CONF.cinder.http_retries,
**service_parameters)
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
d['multiattach'] = getattr(vol, 'multiattach', False)
if vol.attachments:
d['attachments'] = collections.OrderedDict()
for attachment in vol.attachments:
a = {attachment['server_id']:
{'attachment_id': attachment.get('attachment_id'),
'mountpoint': attachment.get('device')}
}
d['attachments'].update(a.items())
d['attach_status'] = 'attached'
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_cinder_exception(method):
"""Transforms a cinder exception but keeps its traceback intact."""
@functools.wraps(method)
def wrapper(self, ctx, *args, **kwargs):
try:
res = method(self, ctx, *args, **kwargs)
except (cinder_exception.ConnectionError,
keystone_exception.ConnectionError) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.CinderConnectionFailed(reason=err_msg))
except (keystone_exception.BadRequest,
cinder_exception.BadRequest) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.InvalidInput(reason=err_msg))
except (keystone_exception.Forbidden,
cinder_exception.Forbidden) as exc:
err_msg = encodeutils.exception_to_unicode(exc)
_reraise(exception.Forbidden(err_msg))
return res
return wrapper
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.VolumeNotFound(volume_id=volume_id))
except cinder_exception.OverLimit:
_reraise(exception.OverQuota(overs='volumes'))
return res
return translate_cinder_exception(wrapper)
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.SnapshotNotFound(snapshot_id=snapshot_id))
return res
return translate_cinder_exception(wrapper)
def translate_mixed_exceptions(method):
"""Transforms exceptions that can come from both volumes and snapshots."""
def wrapper(self, ctx, res_id, *args, **kwargs):
try:
res = method(self, ctx, res_id, *args, **kwargs)
except (keystone_exception.NotFound, cinder_exception.NotFound):
_reraise(exception.VolumeNotFound(volume_id=res_id))
except cinder_exception.OverLimit:
_reraise(exception.OverQuota(overs='snapshots'))
return res
return translate_cinder_exception(wrapper)
def _reraise(desired_exc):
six.reraise(type(desired_exc), desired_exc, sys.exc_info()[2])
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
@translate_cinder_exception
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True,
search_opts=search_opts)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("volume '%(vol)s' status must be 'in-use'. Currently in "
"'%(status)s' status") % {"vol": volume['id'],
"status": volume['status']}
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("volume '%(vol)s' status must be 'available'. Currently "
"in '%(status)s'") % {'vol': volume['id'],
'status': volume['status']}
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("volume %s already attached") % volume['id']
raise exception.InvalidVolume(reason=msg)
self.check_availability_zone(context, volume, instance)
def check_availability_zone(self, context, volume, instance=None):
"""Ensure that the availability zone is the same."""
# TODO(walter-boring): move this check to Cinder as part of
# the reserve call.
if instance and not CONF.cinder.cross_az_attach:
instance_az = az.get_instance_availability_zone(context, instance)
if instance_az != volume['availability_zone']:
msg = _("Instance %(instance)s and volume %(vol)s are not in "
"the same availability_zone. Instance is in "
"%(ins_zone)s. Volume is in %(vol_zone)s") % {
"instance": instance['id'],
"vol": volume['id'],
'ins_zone': instance_az,
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# NOTE(ildikov):Preparation for multiattach support, when a volume
# can be attached to multiple hosts and/or instances,
# so just check the attachment specific to this instance
if instance is not None and instance.uuid not in volume['attachments']:
# TODO(ildikov): change it to a better exception, when enable
# multi-attach.
raise exception.VolumeUnattached(volume_id=volume['id'])
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id, instance_uuid=None,
attachment_id=None):
client = cinderclient(context)
if client.version == '1':
client.volumes.detach(volume_id)
return
if attachment_id is None:
volume = self.get(context, volume_id)
if volume['multiattach']:
attachments = volume.get('attachments', {})
if instance_uuid:
attachment_id = attachments.get(instance_uuid, {}).\
get('attachment_id')
if not attachment_id:
LOG.warning(_LW("attachment_id couldn't be retrieved "
"for volume %(volume_id)s with "
"instance_uuid %(instance_id)s. The "
"volume has the 'multiattach' flag "
"enabled, without the attachment_id "
"Cinder most probably cannot perform "
"the detach."),
{'volume_id': volume_id,
'instance_id': instance_uuid})
else:
LOG.warning(_LW("attachment_id couldn't be retrieved for "
"volume %(volume_id)s. The volume has the "
"'multiattach' flag enabled, without the "
"attachment_id Cinder most probably "
"cannot perform the detach."),
{'volume_id': volume_id})
client.volumes.detach(volume_id, attachment_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
try:
connection_info = cinderclient(
context).volumes.initialize_connection(volume_id, connector)
connection_info['connector'] = connector
return connection_info
except cinder_exception.ClientException as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Initialize connection failed for volume '
'%(vol)s on host %(host)s. Error: %(msg)s '
'Code: %(code)s. Attempting to terminate '
'connection.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(ex),
'code': ex.code})
try:
self.terminate_connection(context, volume_id, connector)
except Exception as exc:
LOG.error(_LE('Connection between volume %(vol)s and host '
'%(host)s might have succeeded, but attempt '
'to terminate connection has failed. '
'Validate the connection and determine if '
'manual cleanup is needed. Error: %(msg)s '
'Code: %(code)s.'),
{'vol': volume_id,
'host': connector.get('host'),
'msg': six.text_type(exc),
'code': (
exc.code if hasattr(exc, 'code') else None)})
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
@translate_cinder_exception
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
@translate_volume_exception
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
client = cinderclient(context)
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id,
name=name,
description=description)
item = client.volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
@translate_cinder_exception
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_mixed_exceptions
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_mixed_exceptions
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
@translate_cinder_exception
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
| |
#-*- coding: utf-8 -*-
"""
Interact with the client.
"""
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from django.template import RequestContext
from oauth2app.models import Client, AccessToken, Code, AccessRange, TimestampGenerator
from base64 import b64encode
from django.contrib.auth.decorators import login_required
from .models import OauthRequest
from django.conf import settings
import simplejson as json
import requests
import traceback
from urllib import urlencode
import logging
#=================================================================
# Prevent sql commands from being printed out for this section...
from django.conf import settings
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.util import CursorWrapper
if settings.DEBUG:
BaseDatabaseWrapper.make_debug_cursor = lambda self, cursor: CursorWrapper(cursor, self)
#==================================================================
log = logging.getLogger("myclient.apps.client.views")
@login_required
def client(request, client_id):
"""
Show the client details, available codes and access tokens for a
given client.
"""
# XXX The client should eventually be decoupled from the user
# account information.
log.debug("In apps.client.views.client - entered")
client = Client.objects.get(key=client_id)
template = {
"client":client,
"basic_auth":"Basic %s" % b64encode(client.key + ":" + client.secret),
"codes":Code.objects.filter(client=client).select_related(),
"access_tokens":AccessToken.objects.filter(client=client).select_related()}
template["error_description"] = request.GET.get("error_description")
return render_to_response(
'client/client.html',
template,
RequestContext(request))
# Login not required.
def request(request, req_id):
"""
Receive an authorization code from the server. The request object
is used to generate the URL.
"""
# client => server : create account
# server => client: client configuration
# client => server: authorize request
# server => client: respond with authorization code
# XXX Rationalize the URL. Right now it is client/request/aaa
# it could easily be request/aaa or callback/aaa
log.debug("In apps.client.views.request - entered")
if request.META.has_key('HTTP_REFERRER'):
log.debug("Referrer = %s " % (request.META['HTTP_REFERRER']))
try:
# The oauthRequest object is the state object for the request. It
# keeps track of what the user asked and is updated when new
# information is received from the server. Lookup that object
# first.
req = OauthRequest.objects.get(key=req_id)
log.debug("Found request = %s " % vars(req))
# Find the code
code_key = request.GET.get('code')
log.debug("Received code = %s " % code_key)
# => Check where the code has been used. Code object has been
# deleted. So it cant be used. This is a hack to make things
# work with oauth2app
if ((req.code == code_key) and (req.completed != 0)):
log.error("Duplicate callback. Should not come here")
raise Exception("Code already used")
#=> Check if the code exists.
try:
code = Code.objects.get(key=code_key)
except:
code = None
pass
if code is not None:
# Giving token refresh request triggers a callback to this
# looks like (based on the redirect_uri specified")
raise Exception("Duplicate code returned")
# XXX expire is set automatically. But eventually it must be
# transmitted from the server. For some reason the server is
# not sending the expiration information.
code = Code.objects.create(user=req.user,
client=req.client,
redirect_uri=request.path,
key=code_key)
# copy the scope information into the code from the initial
# request. This information is not available in the callback
# from the server
code.scope.add(*req.scope.all())
code.save()
log.debug("saved code = %s " % vars(code))
log.debug("code scope = %s " % code.scope)
# XXX This is not being used. May be it can be removed.
req.completed = TimestampGenerator()()
req.code = code_key # we dont store code object because it
# will be deleted
req.save()
log.debug("saved request = %s " % vars(req))
except:
traceback.print_exc()
pass
#client = Client.objects.all()[:1][0]
client = req.client
# Add this to the code objects
template = {
"client":client,
"basic_auth":"Basic %s" % b64encode(client.key + ":" + client.secret),
"codes":Code.objects.filter(client=client).select_related(),
"access_tokens":AccessToken.objects.filter(client=client).select_related()}
template["error_description"] = request.GET.get("error_description")
return render_to_response(
'client/client.html',
template,
RequestContext(request))
# Helper functions
def get_auth(auth_type='basic'):
if auth_type != "basic":
raise Exception("Unknown authentication type asked for")
client_key = settings.RESOURCE_CLIENT_KEY
client_secret = settings.RESOURCE_CLIENT_SECRET
log.debug("Client_key = %s " % client_key)
log.debug("Client secret= %s" % client_secret)
basic_auth = "Basic %s" % b64encode(client_key + ":" + client_secret)
log.debug("computed authorization = %s " % basic_auth)
return basic_auth
@login_required
@csrf_exempt
def request_token(request):
"""
Handle the request from the user to obtain an access token for a
given code.
"""
log.debug("Trying to obtain a token")
if request.method != "GET":
return HttpResponseRedirect("/")
try:
params = {}
# extract the user-specified code
params['code'] = request.GET.get('code')
# Obtain the state objects
code = Code.objects.get(key=params['code'])
req = OauthRequest.objects.get(code=params['code'])
client = req.client
#client = Client.objects.all()[:1][0]
log.debug("Client object = %s " % vars(client))
log.debug("Code object = %s " % vars(code))
# set the standard parameters
params['client_id'] = settings.RESOURCE_CLIENT_KEY
params['grant_type'] = 'authorization_code'
params['redirect_uri'] = \
"http://%s/client/request/%s" % (request.get_host(), req.key)
# XXX set the scope. Not sure if this the best way.
log.debug("type req.scope.all() = %s " % type(req.scope.all()))
all_keys = [s.key for s in req.scope.all()]
log.debug("all_scopes = %s " % all_keys)
if len(all_keys) > 0:
params['scope'] = all_keys[0] # XXX should be a join(",") instead?
else:
params['scope'] = ""
log.debug("Sending data = %s " % params)
# Obtain the authentication
basic_auth = get_auth()
headers = { 'Authorization': basic_auth }
log.debug("headers = %s " % headers)
# Constructing the call
url = settings.ACCESS_TOKEN_URL + "/?" + urlencode(params)
log.debug("url = %s " % url)
# There is nothing in the body. There is only a post - which
# for some reason seems to turn into a GET at the other end.
r = requests.post(url, data="", headers=headers)
log.debug("received headers = %s " % r.headers)
if r.headers['content-type'] != 'application/json':
log.error("Received error from server %s" % r.content)
raise Exception("Possible error in request to the server")
#=> Now store the token for future access purposes
grant_data = json.loads(r.content)
log.debug("grant data = %s " % grant_data)
content = grant_data
if not grant_data.has_key('error'):
# Create an access token. There is no place, it seems for a
# token_type (bearer etc.)
access_token = \
AccessToken.objects.create(user=request.user,
client=client,
token=grant_data['access_token'],
refresh_token=grant_data['refresh_token'],
# token_type=gr['token_type'],
# scope=grant_data['scope']
)
# Should be this [grant_data['scope']]?
access_ranges = list(AccessRange.objects.filter(key__in=[grant_data['scope']]))
access_token.scope = access_ranges
# alternative to the above
# access_token.scope.add(*req.scope.all())
access_token.save()
# Update the state
req.refresh_token = grant_data['refresh_token']
req.token = grant_data['access_token']
req.save()
log.debug("access token = %s " % vars(access_token))
# Clean up
code.delete()
log.debug("Deleted code")
# wrap if necessary...
try:
callback=request.GET.get('callback')
json_data = "%s(%s);" % (callback, json.dumps(content))
except:
json_data = json.dumps(content)
# => Send the response back
log.debug("Response = %s " % json_data)
response = HttpResponse(json_data,
content_type="application/json")
return response
except:
traceback.print_exc()
pass
@login_required
@csrf_exempt
def refresh_token(request):
"""
Handle a request from the user for refreshing a token.
"""
# <form method="post" action="http://localhost:8000/oauth2/token"
# class="authenticate">
# <input type="hidden" name="grant_type" value="refresh_token" />
# <input type="hidden" name="refresh_token" value="38d6122e30" />
# <input type="hidden" name="client_id" value="d53d90894c157ab" />
# <input type="hidden" name="scope" value="" />
# <input type="submit" value="38d6122e30"/>
# </form>
log.debug("in refresh_token")
if request.method != "GET":
return HttpResponseRedirect("/")
# Obtain and lookup a refresh token
refresh_token_key = request.GET.get('refresh_token')
try:
token = AccessToken.objects.get(refresh_token=refresh_token_key)
req = OauthRequest.objects.get(refresh_token=refresh_token_key)
client = req.client
# Start constructing the request that must be sent to the resource
# server.
params = {}
params['client_id'] = settings.RESOURCE_CLIENT_KEY
params['grant_type'] = 'refresh_token'
params['refresh_token'] = token.refresh_token
# Dont need to specify the redirect_uri as we dont need a call
# back from the server. Just the json response is enough.
params['redirect_uri'] = \
"http://%s/client/request/%s" % (request.get_host(), req.key)
# => set the scope of the refresh. Not sure why scope is required
# again because it has been specific while obtaining the
# authorization. The scope cant be any different.
log.debug("type req.scope.all() = %s " % type(req.scope.all()))
all_keys = [s.key for s in req.scope.all()]
log.debug("all_scopes = %s " % all_keys)
if len(all_keys) > 0:
params['scope'] = all_keys[0]
else:
params['scope'] = ""
# => params to ready
log.debug("params = %s " % params)
# Obtain the authentication
basic_auth = get_auth()
headers = { 'Authorization': basic_auth }
log.debug("headers = %s " % headers)
# Constructing the call
url = settings.ACCESS_TOKEN_URL + "/?" + urlencode(params)
log.debug("url = %s " % url)
# Call the server
r = requests.post(url, data="", headers=headers)
log.debug("received headers = %s " % r.headers)
if r.headers['content-type'] != 'application/json':
# Should probably delete the token
log.error("Received error from server %s" % r.content)
raise Exception("Possible error in request to the server")
# => Now store the token for future access purposes
grant_data = json.loads(r.content)
log.debug("grant data = %s " % grant_data)
if grant_data.has_key('error'):
# Dont change anything
content = grant_data
else:
# => Update the token state
token.token = grant_data['access_token']
token.refresh_token = grant_data['refresh_token']
now = TimestampGenerator()()
token.expire =now + int(grant_data['expire_in'])
token.save()
# Update the request state
req.token = grant_data['access_token']
req.refresh_token = grant_data['refresh_token']
req.save()
# response
content = {'refresh_token': req.refresh_token, 'token': req.token}
# wrap if necessary...
try:
callback=request.GET.get('callback')
json_data = "%s(%s);" % (callback, json.dumps(content))
except:
json_data = json.dumps(content)
# XXX May be this should be the response instead
#next = "/client/%s" % client.key
#log.debug("Redirecting to %s " % next)
#return HttpResponseRedirect(next)
# => Send the response back
log.debug("Response = %s " % json_data)
response = HttpResponse(json_data,
content_type="application/json")
return response
except:
traceback.print_exc()
raise Exception("Invalid refresh token")
@login_required
def forward(request):
"""
Request authorization from the resource server. First create a
OauthRequest object to store local state and use when the callback
is obtained.
"""
# The request from the user is coming in the form a get. It must
# specify whether aadhaar must be used for authorization.
# XXX change /client/forward to something else. may be request_code
if request.method != "GET":
return HttpResponseRedirect(nexturl)
log.debug("request parameters = %s " % request.GET)
# Figure out if this request is aadhaar
try:
aadhaar = False
aadhaar_str = request.GET.get('aadhaar')
if aadhaar_str == "True":
aadhaar = True
except:
pass
log.debug("aadhaar = %s %s " % (aadhaar, type(aadhaar)))
# Some dummy client. It does not matter.
# XXX change this if you need to support multiple clients.
client = Client.objects.all()[:1][0]
if client == None:
raise Exception("Could not find a suitable client")
user = request.user
scope_key = request.GET.get('scope')
log.debug("scope key = %s %s " % (scope_key, type(scope_key)))
if ((scope_key is not None) and (scope_key != "None")):
access_ranges = list(AccessRange.objects.filter(key__in=[scope_key]))
else:
access_ranges = []
log.debug("access_ranges = %s " % access_ranges)
log.debug([r.key for r in access_ranges])
response_type = 'code'
# Now create the request.
req = OauthRequest.objects.create(client=client,
user=user,
#aadhaar=aadhaar,
response_type=response_type)
req.scope = access_ranges
req.save()
log.debug("created request object %s with key %s " % (req, req.key))
# Construct the request
#params = request.GET.copy()
params = {}
params['response_type'] = response_type
params['client_id'] = settings.RESOURCE_CLIENT_KEY
if scope_key is not None:
params['scope'] = scope_key
params['redirect_uri'] = "http://%s/client/request/%s" % (request.get_host(), req.key)
# Construct the url
url = urlencode(params)
if aadhaar:
next = "%s?%s" % (settings.AADHAAR_AUTHORIZE_URL, url)
else:
next = "%s?%s" % (settings.AUTHORIZE_URL, url)
log.debug("Updated parameters = %s " % params)
log.debug("Redirecting to %s " % next)
return HttpResponseRedirect(next)
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
import logging
import random
import time
from datetime import date, datetime
import pytz
from babel.dates import format_datetime
from dateutil.relativedelta import relativedelta
from google.appengine.ext import webapp, deferred, db
from mcfw.properties import azzert
from mcfw.rpc import serialize_complex_value, returns, arguments
from rogerthat.bizz.communities.models import Community
from rogerthat.bizz.job import run_job
from rogerthat.consts import SCHEDULED_QUEUE
from rogerthat.dal import parent_key_unsafe, put_and_invalidate_cache
from rogerthat.dal.profile import get_profile_infos
from rogerthat.models import Message
from rogerthat.rpc import users
from rogerthat.rpc.users import set_user
from rogerthat.service.api import messaging, system
from rogerthat.to.messaging import AnswerTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.translations import DEFAULT_LANGUAGE
from rogerthat.utils import now, send_mail
from rogerthat.utils.channel import send_message
from solution_server_settings import get_solution_server_settings
from solutions import translate
from solutions.common import SOLUTION_COMMON
from solutions.common.bizz import SolutionModule, get_app_info_cached
from solutions.common.bizz.inbox import create_solution_inbox_message, add_solution_inbox_message
from solutions.common.bizz.loyalty import update_user_data_admins, create_loyalty_statistics_for_service, \
send_email_to_user_for_loyalty_update
from solutions.common.bizz.messaging import send_inbox_forwarders_message
from solutions.common.bizz.settings import get_service_info
from solutions.common.dal import get_solution_settings
from solutions.common.models import SolutionInboxMessage, SolutionSettings
from solutions.common.models.loyalty import SolutionLoyaltyLottery, SolutionLoyaltySettings, \
SolutionLoyaltyVisitLottery, \
SolutionLoyaltyLotteryStatistics, SolutionCityWideLottery, SolutionCityWideLotteryStatistics, \
SolutionCityWideLotteryVisit
from solutions.common.models.properties import SolutionUserTO
from solutions.common.to import SolutionInboxMessageTO
from solutions.common.to.loyalty import ExtendedUserDetailsTO
from solutions.common.utils import create_service_identity_user_wo_default
class LootLotteryCronHandler(webapp.RequestHandler):
def get(self):
_schedule_loot_lottery()
_schedule_loot_city_wide_lottery()
class SolutionLoyaltyExportHandler(webapp.RequestHandler):
def get(self):
create_loyalty_export_pdfs()
def _schedule_loot_lottery():
run_job(_qry, [], _worker, [])
def _qry():
return SolutionLoyaltyLottery.all(keys_only=True).filter("schedule_loot_time <", now()).filter("schedule_loot_time >", 0)
def _worker(sln_loyalty_lottery_key):
def trans():
sln_loyalty_lottery = db.get(sln_loyalty_lottery_key)
service_user = sln_loyalty_lottery.service_user
logging.info("loyalty lottery loot: %s", service_user)
sls_key = SolutionLoyaltySettings.create_key(service_user)
sln_settings_key = SolutionSettings.create_key(service_user)
sln_loyalty_settings, sln_settings = db.get([sls_key, sln_settings_key])
if SolutionModule.LOYALTY in sln_settings.modules:
if sln_loyalty_settings.loyalty_type != SolutionLoyaltySettings.LOYALTY_TYPE_LOTTERY:
sln_loyalty_lottery.deleted = True
else:
now_tz = int(time.mktime(datetime.fromtimestamp(now(), pytz.timezone(sln_settings.timezone)).timetuple()))
logging.debug("sln_loyalty_lottery.end_timestamp: %s", sln_loyalty_lottery.end_timestamp)
logging.debug("end: %s" , now_tz)
seconds_before = sln_loyalty_lottery.end_timestamp - now_tz
if seconds_before < 0:
seconds_before = 0
logging.debug("_schedule_loot_lottery seconds_before: %s", seconds_before)
deferred.defer(_pick_winner, service_user, sln_loyalty_lottery_key,
_countdown=seconds_before, _queue=SCHEDULED_QUEUE, _transactional=True)
else:
sln_loyalty_lottery.deleted = True
sln_loyalty_lottery.schedule_loot_time = sln_loyalty_lottery.schedule_loot_time * -1
sln_loyalty_lottery.put()
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
@returns()
@arguments(service_user=users.User, message_key=unicode, message_parent_key=unicode, parent_message_key=unicode, dirty_behavior=int)
def _messaging_seal(service_user, message_key, message_parent_key, parent_message_key, dirty_behavior):
users.set_user(service_user)
try:
messaging.seal(message_key, message_parent_key, parent_message_key, 1)
finally:
users.clear_user()
def _pick_winner(service_user, sln_loyalty_lottery_key):
now_ = now()
sln_loyalty_lottery = db.get(sln_loyalty_lottery_key)
if sln_loyalty_lottery.claimed or sln_loyalty_lottery.redeemed or sln_loyalty_lottery.deleted:
return
if sln_loyalty_lottery.schedule_loot_time > 0:
return
service_identity = sln_loyalty_lottery.service_identity
service_identity_user = create_service_identity_user_wo_default(service_user, service_identity)
sls_key = SolutionLoyaltySettings.create_key(service_user)
slls_key = SolutionLoyaltyLotteryStatistics.create_key(service_user, service_identity)
sln_settings_key = SolutionSettings.create_key(service_user)
sln_loyalty_settings, slls, sln_settings = db.get([sls_key, slls_key, sln_settings_key])
if sln_loyalty_settings.loyalty_type != SolutionLoyaltySettings.LOYALTY_TYPE_LOTTERY:
sln_loyalty_lottery.deleted = True
sln_loyalty_lottery.put()
return
logging.info("loyalty lottery loot: %s", service_user)
possible_winners = []
if slls:
for i, app_user in enumerate(slls.app_users):
if app_user not in sln_loyalty_lottery.skip_winners and app_user != sln_loyalty_lottery.winner:
for i in xrange(slls.count[i]):
possible_winners.append(app_user)
logging.debug("possible winners count: %s", len(possible_winners))
if len(possible_winners) == 0:
if sln_loyalty_lottery.winner:
logging.debug("can not assign winner, keep old")
else:
logging.debug("can not assign winner, delete lottery")
sln_loyalty_lottery.deleted = True
sln_loyalty_lottery.put()
return
else:
winner = random.choice(possible_winners)
logging.debug("new winner: %s", winner)
slvl = SolutionLoyaltyVisitLottery.all() \
.ancestor(parent_key_unsafe(service_identity_user, SOLUTION_COMMON)) \
.filter('redeemed =', False) \
.filter('app_user =', winner).get()
azzert(slvl, "SolutionLoyaltyVisitLottery for app_user %s not found!" % winner)
if slvl.get_app_user_info():
user_detail = UserDetailsTO()
user_detail.email = slvl.get_app_user_info().email
user_detail.name = slvl.get_app_user_info().name
user_detail.language = slvl.get_app_user_info().language
user_detail.avatar_url = slvl.get_app_user_info().avatar_url
user_detail.app_id = slvl.get_app_user_info().app_id
else:
# XXX: don't use get_profile_infos
profile_info = get_profile_infos([slvl.app_user], allow_none_in_results=True)[0]
if not profile_info or profile_info.isServiceIdentity:
azzert(False, "profile_info for app_user %s not found!" % winner)
else:
user_detail = UserDetailsTO.fromUserProfile(profile_info)
loot_datetime_tz = datetime.fromtimestamp(sln_loyalty_lottery.end_timestamp, pytz.timezone(sln_settings.timezone))
loot_date_str = format_datetime(loot_datetime_tz, format='medium', locale=sln_settings.main_language or DEFAULT_LANGUAGE)
next_datetime_tz = datetime.fromtimestamp(now() + 24 * 3600, pytz.timezone(sln_settings.timezone))
next_date_str = format_datetime(next_datetime_tz, format='medium', locale=sln_settings.main_language or DEFAULT_LANGUAGE)
msg_ok = translate(sln_settings.main_language, 'loyalty-lottery-loot-ok', name=user_detail.name,
date_loot=loot_date_str, prize=sln_loyalty_lottery.winnings, date=next_date_str)
msg_sorry = translate(sln_settings.main_language, 'loyalty-lottery-loot-nok')
btn = AnswerTO()
btn.id = u'%s' % json.dumps({"key": unicode(sln_loyalty_lottery_key)})
btn.type = u'button'
btn.caption = translate(sln_settings.main_language, 'Confirm')
btn.action = None
btn.ui_flags = 0
message_flags = Message.FLAG_ALLOW_DISMISS
service_info = get_service_info(service_user, service_identity)
def trans():
sm_data = []
if sln_loyalty_lottery.winner_timestamp != 0:
logging.debug("loyalty lottery loot: update winner %s", sln_loyalty_lottery.winner)
sim_parent, _ = add_solution_inbox_message(service_user, sln_loyalty_lottery.solution_inbox_message_key, True, None, now_, msg_sorry, mark_as_read=True)
if sim_parent.message_key_by_tag:
message_key_by_tag = json.loads(sim_parent.message_key_by_tag)
if message_key_by_tag.get(u"loyalty_lottery_loot", None):
deferred.defer(_messaging_seal, service_user, message_key_by_tag[u"loyalty_lottery_loot"], sim_parent.message_key, sim_parent.message_key, 1, _transactional=True)
send_inbox_forwarders_message(service_user, service_identity, None, msg_sorry, {
'if_name': user_detail.name,
'if_email':user_detail.email
}, message_key=sim_parent.solution_inbox_message_key, reply_enabled=sim_parent.reply_enabled, send_reminder=False)
deferred.defer(send_email_to_user_for_loyalty_update, service_user, service_identity, sln_loyalty_lottery.winner, msg_sorry, False)
sm_data.append({u"type": u"solutions.common.messaging.update",
u"message": SolutionInboxMessageTO.fromModel(sim_parent, sln_settings, service_info,
True).to_dict()})
logging.debug("loyalty lottery loot: new winner %s", winner)
sim_parent = create_solution_inbox_message(service_user, service_identity, SolutionInboxMessage.CATEGORY_LOYALTY, unicode(sln_loyalty_lottery_key), True, [user_detail], now_, msg_ok, True, mark_as_read=True)
sln_loyalty_lottery.solution_inbox_message_key = sim_parent.solution_inbox_message_key
if sln_loyalty_lottery.winner:
if not sln_loyalty_lottery.skip_winners:
sln_loyalty_lottery.skip_winners = []
sln_loyalty_lottery.skip_winners.append(sln_loyalty_lottery.winner)
sln_loyalty_lottery.pending = False
sln_loyalty_lottery.winner = winner
sln_loyalty_lottery.save_winner_info(SolutionUserTO.fromTO(user_detail) if user_detail else None)
sln_loyalty_lottery.winner_timestamp = now_
sln_loyalty_lottery.put()
send_inbox_forwarders_message(service_user, service_identity, None, msg_ok, {
'if_name': user_detail.name,
'if_email':user_detail.email
}, message_key=sim_parent.solution_inbox_message_key, reply_enabled=sim_parent.reply_enabled, send_reminder=False, answers=[btn], store_tag=u"loyalty_lottery_loot", flags=message_flags)
deferred.defer(send_email_to_user_for_loyalty_update, service_user, service_identity, sln_loyalty_lottery.winner, msg_ok, False, sim_parent.solution_inbox_message_key)
sm_data.append({u"type": u"solutions.common.messaging.update",
u"message": SolutionInboxMessageTO.fromModel(sim_parent, sln_settings, service_info,
True).to_dict()})
sm_data.append({u"type": u"solutions.common.loyalty.lottery.update"})
send_message(service_user, sm_data, service_identity=service_identity)
deferred.defer(_continue, service_user, service_identity, sln_loyalty_lottery_key, _transactional=True)
return sim_parent
xg_on = db.create_transaction_options(xg=True)
return db.run_in_transaction_options(xg_on, trans), sln_loyalty_lottery
def _continue(service_user, service_identity, sln_loyalty_lottery_key):
def trans():
deferred.defer(update_user_data_admins, service_user, service_identity, _transactional=True)
deferred.defer(_pick_winner, service_user, sln_loyalty_lottery_key, _countdown=24 * 3600, _queue=SCHEDULED_QUEUE, _transactional=True)
db.run_in_transaction(trans)
def create_loyalty_export_pdfs():
def get_last_month():
today = date.today()
d = today - relativedelta(months=1)
return date(d.year, d.month, 1)
first_day_of_last_month = int(time.mktime(get_last_month().timetuple()))
first_day_of_current_month = int(time.mktime(date.today().replace(day=1).timetuple()))
countdown = 0
for sln_settings in SolutionSettings.all().filter('modules =', 'loyalty'):
identities = [None]
if sln_settings.identities:
identities.extend(sln_settings.identities)
for service_identity in identities:
deferred.defer(create_loyalty_statistics_for_service, sln_settings.service_user, service_identity,
first_day_of_last_month, first_day_of_current_month, _countdown=countdown)
countdown += 2
def _schedule_loot_city_wide_lottery():
run_job(_qry_city_wide_lottery, [], _worker_city_wide_lottery, [])
def _qry_city_wide_lottery():
return SolutionCityWideLottery.all(keys_only=True).filter("schedule_loot_time <", now()).filter("schedule_loot_time >", 0)
def _get_service_user_for_app_id(sln_settings, app_id):
users.set_user(sln_settings.service_user)
try:
identity = system.get_identity()
if app_id == identity.app_ids[0]:
return sln_settings.service_user
return None
finally:
users.clear_user()
def _worker_city_wide_lottery(sln_cwl_lottery_key):
tmp_sln_cwl = db.get(sln_cwl_lottery_key) # type: SolutionCityWideLottery
community = Community.get_by_default_app(tmp_sln_cwl.app_id) # type: Community
service_user = community.main_service_user
if not service_user:
raise Exception("Failed to do city wide lottery service_user not found for app: %s", tmp_sln_cwl.app_id)
def trans():
sln_cwl = db.get(sln_cwl_lottery_key)
logging.info("city wide lottery loot: %s", sln_cwl.app_id)
sln_settings = db.get(SolutionSettings.create_key(service_user))
if SolutionModule.HIDDEN_CITY_WIDE_LOTTERY in sln_settings.modules:
now_tz = int(time.mktime(datetime.fromtimestamp(now(), pytz.timezone(sln_settings.timezone)).timetuple()))
logging.debug("sln_cwl.end_timestamp: %s", sln_cwl.end_timestamp)
logging.debug("end: %s" , now_tz)
seconds_before = sln_cwl.end_timestamp - now_tz
if seconds_before < 0:
seconds_before = 0
logging.debug("_schedule_loot_city_wide_lottery seconds_before: %s", seconds_before)
deferred.defer(_pick_city_wide_lottery_winner, service_user, sln_cwl_lottery_key,
_countdown=seconds_before, _queue=SCHEDULED_QUEUE, _transactional=True)
else:
sln_cwl.deleted = True
sln_cwl.schedule_loot_time = sln_cwl.schedule_loot_time * -1
sln_cwl.put()
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
def _pick_city_wide_lottery_winner(service_user, sln_cwl_lottery_key):
sln_cwl = db.get(sln_cwl_lottery_key)
if sln_cwl.winners:
return
slls = db.get(SolutionCityWideLotteryStatistics.create_key(sln_cwl.app_id))
logging.info("city wide lottery loot: %s", sln_cwl.app_id)
possible_winners = []
if slls:
for i, app_user in enumerate(slls.app_users):
if app_user not in sln_cwl.skip_winners and app_user not in sln_cwl.winners:
for i in xrange(slls.count[i]):
possible_winners.append(app_user)
logging.debug("possible winners count: %s", len(possible_winners))
if len(possible_winners) == 0:
if sln_cwl.winners:
logging.debug("can not assign winners, keep old")
else:
logging.debug("can not assign winners, delete city wide lottery")
sln_cwl.deleted = True
sln_cwl.put()
return
else:
winners_needed = sln_cwl.x_winners
logging.debug("winners_needed: %s", winners_needed)
if len(possible_winners) < winners_needed:
winners_needed = len(possible_winners)
winners = []
while True:
if not possible_winners:
break
if len(winners) >= winners_needed:
break
winner = random.choice(possible_winners)
possible_winners = filter(lambda a: a != winner, possible_winners)
winners.append(winner)
sln_settings = get_solution_settings(service_user)
winners_info = []
slvl_parent_key = SolutionCityWideLotteryVisit.create_city_parent_key(sln_cwl.app_id)
winner_text = ""
for winner in winners:
slvl = SolutionCityWideLotteryVisit.all() \
.ancestor(slvl_parent_key) \
.filter('redeemed =', False) \
.filter('app_user =', winner).get()
azzert(slvl, "SolutionLoyaltyVisitLottery for app_user %s not found!" % winner)
if slvl.get_app_user_info():
eud = ExtendedUserDetailsTO()
eud.email = slvl.get_app_user_info().email
eud.name = slvl.get_app_user_info().name
eud.language = slvl.get_app_user_info().language
eud.avatar_url = slvl.get_app_user_info().avatar_url
eud.app_id = slvl.get_app_user_info().app_id
else:
# XXX: don't use get_profile_infos
profile_info = get_profile_infos([slvl.app_user], allow_none_in_results=True)[0]
if not profile_info or profile_info.isServiceIdentity:
continue
else:
eud = ExtendedUserDetailsTO.fromUserProfile(profile_info, None)
with set_user(service_user):
app_info = get_app_info_cached(eud.app_id)
eud.app_name = app_info.name
winners_info.append(eud)
winner_text = winner_text + "\n - %s (%s)" % (eud.name, eud.email)
def trans():
sln_cwl.pending = False
sln_cwl.winners = winners
sln_cwl.winners_info = json.dumps(serialize_complex_value(winners_info, ExtendedUserDetailsTO, True))
sln_cwl.put()
deferred.defer(_redeem_city_wide_lottery_visits, service_user, sln_cwl_lottery_key, now(), _transactional=True)
to_emails = sln_settings.inbox_mail_forwarders
if to_emails:
solution_server_settings = get_solution_server_settings()
subject = 'Winnaars gemeentelijke tombola'
body = """Beste,
Volgende mensen hebben gewonnen met de tombola: %s
Met vriendelijke groeten,
Het Onze Stad App Team
""" % winner_text
send_mail(solution_server_settings.shop_export_email, to_emails, subject, body)
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
def _redeem_city_wide_lottery_visits(service_user, sln_cwl_key, now_):
def trans():
models_to_put = []
sln_cwl = db.get(sln_cwl_key)
slls = db.get(SolutionCityWideLotteryStatistics.create_key(sln_cwl.app_id))
if slls:
sln_cwl.count = slls.count
sln_cwl.app_users = slls.app_users
models_to_put.append(sln_cwl)
slls.count = []
slls.app_users = []
models_to_put.append(slls)
for s in SolutionCityWideLotteryVisit.load(sln_cwl.app_id):
s.redeemed = True
s.redeemed_timestamp = now_
models_to_put.append(s)
if models_to_put:
put_and_invalidate_cache(*models_to_put)
send_message(service_user, u"solutions.common.loyalty.points.update")
xg_on = db.create_transaction_options(xg=True)
db.run_in_transaction_options(xg_on, trans)
| |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# flake8: noqa: F401
from jax._src.lax.lax import (
DotDimensionNumbers as DotDimensionNumbers,
Precision as Precision,
RandomAlgorithm as RandomAlgorithm,
RoundingMethod as RoundingMethod,
abs as abs,
abs_p as abs_p,
acos as acos,
acos_p as acos_p,
acosh as acosh,
acosh_p as acosh_p,
abs as abs,
abs_p as abs_p,
acos as acos,
acosh as acosh,
acosh_p as acosh_p,
add as add,
add_p as add_p,
after_all as after_all,
after_all_p as after_all_p,
and_p as and_p,
argmax as argmax,
argmax_p as argmax_p,
argmin as argmin,
argmin_p as argmin_p,
asin as asin,
asin_p as asin_p,
asinh as asinh,
asinh_p as asinh_p,
atan as atan,
atan_p as atan_p,
atan2 as atan2,
atan2_p as atan2_p,
atanh as atanh,
atanh_p as atanh_p,
batch_matmul as batch_matmul,
bessel_i0e as bessel_i0e,
bessel_i0e_p as bessel_i0e_p,
bessel_i1e as bessel_i1e,
bessel_i1e_p as bessel_i1e_p,
betainc as betainc,
bitcast_convert_type as bitcast_convert_type,
bitcast_convert_type_p as bitcast_convert_type_p,
bitwise_and as bitwise_and,
bitwise_not as bitwise_not,
bitwise_or as bitwise_or,
bitwise_xor as bitwise_xor,
broadcast as broadcast,
broadcast_in_dim as broadcast_in_dim,
broadcast_in_dim_p as broadcast_in_dim_p,
broadcast_shapes as broadcast_shapes,
broadcast_to_rank as broadcast_to_rank,
broadcasted_iota as broadcasted_iota,
cbrt as cbrt,
cbrt_p as cbrt_p,
ceil as ceil,
ceil_p as ceil_p,
clamp as clamp,
clamp_p as clamp_p,
clz as clz,
clz_p as clz_p,
collapse as collapse,
complex as complex,
complex_p as complex_p,
concatenate as concatenate,
concatenate_p as concatenate_p,
conj as conj,
conj_p as conj_p,
convert_element_type as convert_element_type,
_convert_element_type as _convert_element_type,
convert_element_type_p as convert_element_type_p,
cos as cos,
cos_p as cos_p,
cosh as cosh,
cosh_p as cosh_p,
create_token as create_token,
create_token_p as create_token_p,
digamma as digamma,
digamma_p as digamma_p,
div as div,
div_p as div_p,
dot as dot,
dot_general as dot_general,
dot_general_p as dot_general_p,
dtype as dtype,
dtypes as dtypes,
eq as eq,
eq_p as eq_p,
erf as erf,
erf_inv as erf_inv,
erf_inv_p as erf_inv_p,
erf_p as erf_p,
erfc as erfc,
erfc_p as erfc_p,
exp as exp,
exp_p as exp_p,
expand_dims as expand_dims,
expm1 as expm1,
expm1_p as expm1_p,
floor as floor,
floor_p as floor_p,
full as full,
full_like as full_like,
ge as ge,
ge_p as ge_p,
gt as gt,
gt_p as gt_p,
igamma as igamma,
igamma_grad_a as igamma_grad_a,
igamma_grad_a_p as igamma_grad_a_p,
igamma_p as igamma_p,
igammac as igammac,
igammac_p as igammac_p,
imag as imag,
imag_p as imag_p,
infeed as infeed,
infeed_p as infeed_p,
integer_pow as integer_pow,
integer_pow_p as integer_pow_p,
iota as iota,
iota_p as iota_p,
is_finite as is_finite,
is_finite_p as is_finite_p,
itertools as itertools,
le as le,
le_p as le_p,
lgamma as lgamma,
lgamma_p as lgamma_p,
log as log,
log1p as log1p,
log1p_p as log1p_p,
log_p as log_p,
lt as lt,
lt_p as lt_p,
max as max,
max_p as max_p,
min as min,
min_p as min_p,
mul as mul,
mul_p as mul_p,
naryop as naryop,
naryop_dtype_rule as naryop_dtype_rule,
ne as ne,
ne_p as ne_p,
neg as neg,
neg_p as neg_p,
nextafter as nextafter,
nextafter_p as nextafter_p,
not_p as not_p,
or_p as or_p,
outfeed as outfeed,
outfeed_p as outfeed_p,
pad as pad,
pad_p as pad_p,
padtype_to_pads as padtype_to_pads,
population_count as population_count,
population_count_p as population_count_p,
pow as pow,
pow_p as pow_p,
prod as prod,
random_gamma_grad as random_gamma_grad,
random_gamma_grad_p as random_gamma_grad_p,
real as real,
real_p as real_p,
reciprocal as reciprocal,
reduce as reduce,
reduce_and_p as reduce_and_p,
reduce_max_p as reduce_max_p,
reduce_min_p as reduce_min_p,
reduce_or_p as reduce_or_p,
reduce_p as reduce_p,
reduce_precision as reduce_precision,
reduce_precision_p as reduce_precision_p,
reduce_prod_p as reduce_prod_p,
reduce_sum_p as reduce_sum_p,
regularized_incomplete_beta_p as regularized_incomplete_beta_p,
rem as rem,
rem_p as rem_p,
reshape as reshape,
reshape_p as reshape_p,
rev as rev,
rev_p as rev_p,
rng_bit_generator as rng_bit_generator,
rng_bit_generator_p as rng_bit_generator_p,
rng_uniform as rng_uniform,
rng_uniform_p as rng_uniform_p,
round as round,
round_p as round_p,
rsqrt as rsqrt,
rsqrt_p as rsqrt_p,
select as select,
select_n as select_n,
select_n_p as select_n_p,
shift_left as shift_left,
shift_left_p as shift_left_p,
shift_right_arithmetic as shift_right_arithmetic,
shift_right_arithmetic_p as shift_right_arithmetic_p,
shift_right_logical as shift_right_logical,
shift_right_logical_p as shift_right_logical_p,
sign as sign,
sign_p as sign_p,
sin as sin,
sin_p as sin_p,
sinh as sinh,
sinh_p as sinh_p,
sort as sort,
sort_key_val as sort_key_val,
sort_p as sort_p,
sqrt as sqrt,
sqrt_p as sqrt_p,
square as square,
squeeze as squeeze,
squeeze_p as squeeze_p,
standard_abstract_eval as standard_abstract_eval,
standard_naryop as standard_naryop,
standard_primitive as standard_primitive,
standard_unop as standard_unop,
stop_gradient as stop_gradient,
sub as sub,
sub_p as sub_p,
tan as tan,
tan_p as tan_p,
tanh as tanh,
tanh_p as tanh_p,
tie_in as tie_in,
top_k as top_k,
top_k_p as top_k_p,
transpose as transpose,
transpose_p as transpose_p,
unop as unop,
unop_dtype_rule as unop_dtype_rule,
xor_p as xor_p,
zeros_like_array as zeros_like_array,
)
from jax._src.lax.slicing import (
GatherDimensionNumbers as GatherDimensionNumbers,
GatherScatterMode as GatherScatterMode,
ScatterDimensionNumbers as ScatterDimensionNumbers,
dynamic_index_in_dim as dynamic_index_in_dim,
dynamic_slice as dynamic_slice,
dynamic_slice_in_dim as dynamic_slice_in_dim,
dynamic_slice_p as dynamic_slice_p,
dynamic_update_index_in_dim as dynamic_update_index_in_dim,
dynamic_update_slice as dynamic_update_slice,
dynamic_update_slice_in_dim as dynamic_update_slice_in_dim,
dynamic_update_slice_p as dynamic_update_slice_p,
gather as gather,
gather_p as gather_p,
index_in_dim as index_in_dim,
index_take as index_take,
scatter as scatter,
scatter_apply as scatter_apply,
scatter_add as scatter_add,
scatter_add_p as scatter_add_p,
scatter_max as scatter_max,
scatter_max_p as scatter_max_p,
scatter_min as scatter_min,
scatter_min_p as scatter_min_p,
scatter_mul as scatter_mul,
scatter_mul_p as scatter_mul_p,
scatter_p as scatter_p,
slice as slice,
slice_in_dim as slice_in_dim,
slice_p as slice_p,
)
from jax._src.lax.convolution import (
ConvDimensionNumbers as ConvDimensionNumbers,
ConvGeneralDilatedDimensionNumbers as ConvGeneralDilatedDimensionNumbers,
conv as conv,
conv_dimension_numbers as conv_dimension_numbers,
conv_general_dilated as conv_general_dilated,
conv_general_dilated_p as conv_general_dilated_p,
conv_general_permutations as conv_general_permutations,
conv_general_shape_tuple as conv_general_shape_tuple,
conv_shape_tuple as conv_shape_tuple,
conv_transpose as conv_transpose,
conv_transpose_shape_tuple as conv_transpose_shape_tuple,
conv_with_general_padding as conv_with_general_padding,
)
from jax._src.lax.windowed_reductions import (
_reduce_window_sum,
_reduce_window_max,
_reduce_window_min,
_reduce_window_prod,
_select_and_gather_add,
_select_and_scatter_add,
reduce_window as reduce_window,
reduce_window_max_p as reduce_window_max_p,
reduce_window_min_p as reduce_window_min_p,
reduce_window_p as reduce_window_p,
reduce_window_shape_tuple as reduce_window_shape_tuple,
reduce_window_sum_p as reduce_window_sum_p,
select_and_gather_add_p as select_and_gather_add_p,
select_and_scatter_p as select_and_scatter_p,
select_and_scatter_add_p as select_and_scatter_add_p,
)
from jax._src.lax.control_flow import (
associative_scan as associative_scan,
cond as cond,
cond_p as cond_p,
cummax as cummax,
cummax_p as cummax_p,
cummin as cummin,
cummin_p as cummin_p,
cumprod as cumprod,
cumprod_p as cumprod_p,
cumsum as cumsum,
cumsum_p as cumsum_p,
custom_linear_solve as custom_linear_solve,
custom_root as custom_root,
fori_loop as fori_loop,
linear_solve_p as linear_solve_p,
map as map,
scan as scan,
scan_bind as scan_bind,
scan_p as scan_p,
switch as switch,
while_loop as while_loop,
while_p as while_p,
)
from jax._src.lax.fft import (
fft as fft,
fft_p as fft_p,
)
from jax._src.lax.parallel import (
all_gather as all_gather,
all_to_all as all_to_all,
all_to_all_p as all_to_all_p,
axis_index as axis_index,
axis_index_p as axis_index_p,
pmax as pmax,
pmax_p as pmax_p,
pmean as pmean,
pmin as pmin,
pmin_p as pmin_p,
ppermute as ppermute,
ppermute_p as ppermute_p,
pshuffle as pshuffle,
psum as psum,
psum_p as psum_p,
psum_scatter as psum_scatter,
pswapaxes as pswapaxes,
pdot as pdot,
xeinsum as xeinsum,
)
from jax._src.lax.other import (
conv_general_dilated_local as conv_general_dilated_local,
conv_general_dilated_patches as conv_general_dilated_patches
)
from jax._src.lax.ann import (
approx_max_k as approx_max_k,
approx_min_k as approx_min_k,
approx_top_k_p as approx_top_k_p
)
from jax._src.ad_util import stop_gradient_p as stop_gradient_p
from jax.lax import linalg as linalg
| |
# -*- coding: utf-8 -*-
"""Parsers for MacOS fseventsd files."""
import os
from dfdatetime import semantic_time as dfdatetime_semantic_time
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import dtfabric_helper
from plaso.lib import errors
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
class FseventsdEventData(events.EventData):
"""MacOS file system event (fseventsd) event data
Attributes:
event_identifier (int): the record event identifier.
flags (int): flags stored in the record.
node_identifier (int): file system node identifier related to the file
system event.
path (str): path recorded in the fseventsd record.
"""
DATA_TYPE = 'macos:fseventsd:record'
def __init__(self):
"""Initializes an Fseventsd event data."""
super(FseventsdEventData, self).__init__(data_type=self.DATA_TYPE)
self.event_identifier = None
self.flags = None
self.node_identifier = None
self.path = None
class FseventsdParser(
interface.FileObjectParser, dtfabric_helper.DtFabricHelper):
"""Parser for fseventsd files.
This parser supports both version 1 and version 2 fseventsd files.
Refer to http://nicoleibrahim.com/apple-fsevents-forensics/ for details.
"""
NAME = 'fseventsd'
DATA_FORMAT = 'MacOS File System Events Disk Log Stream (fseventsd) file'
# The version 1 format was used in Mac OS X 10.5 (Leopard) through macOS 10.12
# (Sierra).
_DLS_V1_SIGNATURE = b'1SLD'
# The version 2 format was introduced in MacOS High Sierra (10.13).
_DLS_V2_SIGNATURE = b'2SLD'
_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'fseventsd.yaml')
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(cls._DLS_V1_SIGNATURE, offset=0)
format_specification.AddNewSignature(cls._DLS_V2_SIGNATURE, offset=0)
return format_specification
def _ParseDLSPageHeader(self, file_object, page_offset):
"""Parses a DLS page header from a file-like object.
Args:
file_object (file): file-like object to read the header from.
page_offset (int): offset of the start of the page header, relative
to the start of the file.
Returns:
tuple: containing:
dls_page_header: parsed record structure.
int: header size.
Raises:
ParseError: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, page_size = self._ReadStructureFromFileObject(
file_object, page_offset, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse page header at offset: 0x{0:08x} '
'with error: {1!s}'.format(page_offset, exception))
return page_header, page_size
def _BuildEventData(self, record):
"""Builds an FseventsdData object from a parsed structure.
Args:
record (dls_record_v1|dls_record_v2): parsed record structure.
Returns:
FseventsdEventData: event data attribute container.
"""
event_data = FseventsdEventData()
event_data.path = record.path
event_data.flags = record.event_flags
event_data.event_identifier = record.event_identifier
# Node identifier is only set in DLS V2 records.
event_data.node_identifier = getattr(record, 'node_identifier', None)
return event_data
def _GetParentModificationTime(self, gzip_file_entry):
"""Retrieves the modification time of the file entry's parent file.
Note that this retrieves the time from the file entry of the parent of the
gzip file entry's path spec, which is different from trying to retrieve it
from the gzip file entry's parent file entry.
It would be preferable to retrieve the modification time from the metadata
in the gzip file itself, but it appears to not be set when the file is
written by fseventsd.
Args:
gzip_file_entry (dfvfs.FileEntry): file entry of the gzip file containing
the fseventsd data.
Returns:
dfdatetime.DateTimeValues: parent modification time, or None if not
available.
"""
parent_file_entry = path_spec_resolver.Resolver.OpenFileEntry(
gzip_file_entry.path_spec.parent)
if not parent_file_entry:
return None
return parent_file_entry.modification_time
def ParseFileObject(self, parser_mediator, file_object):
"""Parses an fseventsd file.
Args:
parser_mediator (ParserMediator): parser mediator.
file_object (dfvfs.FileIO): a file-like object.
Raises:
WrongParser: when the header cannot be parsed.
"""
page_header_map = self._GetDataTypeMap('dls_page_header')
try:
page_header, file_offset = self._ReadStructureFromFileObject(
file_object, 0, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.WrongParser(
'Unable to parse page header with error: {0!s}'.format(
exception))
current_page_end = page_header.page_size
file_entry = parser_mediator.GetFileEntry()
date_time = self._GetParentModificationTime(file_entry)
# TODO: Change this to use a more representative time definition (time span)
# when https://github.com/log2timeline/dfdatetime/issues/65 is resolved.
if date_time:
timestamp_description = definitions.TIME_DESCRIPTION_RECORDED
else:
date_time = dfdatetime_semantic_time.NotSet()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
file_size = file_object.get_size()
while file_offset < file_size:
if file_offset >= current_page_end:
try:
page_header, header_size = self._ParseDLSPageHeader(
file_object, file_offset)
except errors.ParseError as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page header with error: {0!s}'.format(
exception))
break
current_page_end += page_header.page_size
file_offset += header_size
continue
if page_header.signature == self._DLS_V1_SIGNATURE:
record_map = self._GetDataTypeMap('dls_record_v1')
else:
record_map = self._GetDataTypeMap('dls_record_v2')
try:
record, record_length = self._ReadStructureFromFileObject(
file_object, file_offset, record_map)
file_offset += record_length
except (ValueError, errors.ParseError) as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse page record with error: {0!s}'.format(
exception))
break
event_data = self._BuildEventData(record)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(FseventsdParser)
| |
import unittest
from pyorient.serializations import OrientSerialization
from pyorient.otypes import OrientBinaryObject, OrientRecord
def binary_db_connect():
import pyorient
DB = pyorient.OrientDB("localhost", 2424, OrientSerialization.Binary)
DB.connect("root", "root")
db_name = "binary_test"
try:
DB.db_drop(db_name)
except pyorient.PyOrientCommandException as e:
print(e)
finally:
db = DB.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
pass
cluster_info = DB.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
return DB
def skip_binary_if_pyorient_native_not_installed( func ):
from pyorient.serializations import binary_support
from os import sys
import types
if sys.version_info[ 0 ] < 3:
test_instance = isinstance( func, ( type, types.ClassType ) )
else:
test_instance = isinstance( func, type )
if not test_instance:
if not binary_support:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = "pyorient_native not installed."
return func
class SerializationTestCase(unittest.TestCase):
def test_mixed_list(self):
rec = OrientRecord( {
'__o_class': 'ListTest',
'list': [ 1, 'a' ]
} )
serializer = OrientSerialization.get_impl( OrientSerialization.CSV )
raw = serializer.encode( rec )
assert raw == 'ListTest@list:[1,"a"]'
@skip_binary_if_pyorient_native_not_installed
def test_binary_string(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': 'val'}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_int(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': int(-1)}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_long(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
import sys
if sys.version_info[0] < 3:
data = {'key': long(-1)}
else:
data = {'key': int(-1)}
DB.record_create(cluster_id, {'@MyModel': data})
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_float(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': 1.0}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_list(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': [1, 'a', 3, 4.0, [42, 27]]}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_dict(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': {'str': 'a', 'int': 0, 'list': [1, 2, 3], 'nested_dict':
{'nestkey': 'nestval'}}}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_link(self):
from pyorient.otypes import OrientRecordLink
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': 'node0'}
DB.record_create(cluster_id, {'@MyModel': data})
data1 = {'key': 'node1'}
DB.record_create(cluster_id, {'@MyModel': data1})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
DB.command("CREATE CLASS test EXTENDS E")
DB.command("create edge test from %s to %s" % ("#" + _n_rid + ":0",
"#" + _n_rid + ":1"))
rec0 = DB.record_load("#" + _n_rid + ":0")
rec1 = DB.record_load("#" + _n_rid + ":1")
link = DB.record_load(rec0.oRecordData['out_test'][0].get_hash())
assert link.oRecordData['out'].get_hash() == rec0._rid
assert link.oRecordData['in'].get_hash() == rec1._rid
assert rec0.oRecordData['out_test'][0].get_hash() == \
rec1.oRecordData['in_test'][0].get_hash()
@skip_binary_if_pyorient_native_not_installed
def test_binary_date(self):
import datetime
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': datetime.date.today()}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_datetime(self):
import datetime
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
dt = datetime.datetime.now()
# OrientDB datetime has millisecond precision
dt = dt.replace(microsecond=int(dt.microsecond / 1000) * 1000)
data = {'key': dt}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
@skip_binary_if_pyorient_native_not_installed
def test_binary_none(self):
DB = binary_db_connect()
DB.command("CREATE CLASS MyModel EXTENDS V")[0]
cluster_id = DB.command("select classes[name='MyModel']" + \
".defaultClusterId from 0:1")[0].oRecordData['classes']
data = {'key': None}
DB.record_create(cluster_id, {'@MyModel': data})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec = DB.record_load("#" + _n_rid + ":0")
assert rec.oRecordData == data
def test_csv_decoding(self):
serializer = OrientSerialization.get_impl(OrientSerialization.CSV)
content = 'Animal@name:"rat",specie:"rodent",out_Eat:%AQAAAAEADQAAAAAAAAAAAAAAAAAAAAAAAA==;'
_, record = serializer.decode(content)
assert isinstance(record, dict)
assert record['name'] == 'rat'
assert isinstance(record['out_Eat'], OrientBinaryObject)
# TODO: add several more complex tests to have more coverage
def test_csv_encoding(self):
rec = OrientRecord({
'__o_class': 'Animal',
'name': 'rat',
'specie': 'rodent'
})
serializer = OrientSerialization.get_impl(OrientSerialization.CSV)
raw = serializer.encode(rec)
assert raw.startswith('Animal@')
assert 'name:"rat"' in raw
assert 'specie:"rodent"' in raw
# TODO: add several more complex tests to have more coverage
def test_csv_escape(self):
import pyorient
DB = pyorient.OrientDB("localhost", 2424)
DB.connect("root", "root")
db_name = "test_escape"
try:
DB.db_drop(db_name)
except pyorient.PyOrientCommandException as e:
print(e)
finally:
db = DB.db_create(db_name, pyorient.DB_TYPE_GRAPH,
pyorient.STORAGE_TYPE_MEMORY)
pass
cluster_info = DB.db_open(
db_name, "admin", "admin", pyorient.DB_TYPE_GRAPH, ""
)
cluster_id = DB.command("CREATE CLASS MyModel EXTENDS V")[0]
data0 = {'key': '"""'}
DB.record_create(cluster_id, {'@MyModel': data0})
data1 = {'key': "'''"}
DB.record_create(cluster_id, {'@MyModel': data1})
data2 = {'key': '\\'}
DB.record_create(cluster_id, {'@MyModel': data2})
data3 = {'key': '\0'}
DB.record_create(cluster_id, {'@MyModel': data3})
data4 = {'key': '""'}
DB.record_create(cluster_id, {'@MyModel': data4})
data5 = {'key': '\'\'""\0 \\ execution'}
DB.record_create(cluster_id, {'@MyModel': data5})
import sys
if sys.version_info[0] >= 3 and isinstance(cluster_id, bytes):
_n_rid = cluster_id.decode()
else:
_n_rid = str(cluster_id)
rec0 = DB.record_load("#" + _n_rid + ":0")
# assert rec0._class == "MyModel"
assert rec0.oRecordData == data0
rec1 = DB.record_load("#" + _n_rid + ":1")
# assert rec1._class == "MyModel"
assert rec1.oRecordData == data1
rec2 = DB.record_load("#" + _n_rid + ":2")
# assert rec2._class == "MyModel"
assert rec2.oRecordData == data2
rec3 = DB.record_load("#" + _n_rid + ":3")
# assert rec3._class == "MyModel"
assert rec3.oRecordData == data3
rec4 = DB.record_load("#" + _n_rid + ":4")
# assert rec4._class == "MyModel"
assert rec4.oRecordData == data4
rec5 = DB.record_load("#" + _n_rid + ":5")
# assert rec5._class == "MyModel"
assert rec5.oRecordData == data5
| |
import numpy as np
from .core import Nominable
from .params import Parameters, Scale, Precision
from csb.core import validatedproperty
class Probability(Nominable):
"""Probability
Generic class that will be subclassed by all probabilistic models.
"""
def log_prob(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def __init__(self, name, params=None):
self.name = name
self.params = params or Parameters()
class Likelihood(Probability):
@validatedproperty
def data(values):
"""
Observed data stored in a single vector.
"""
return np.ascontiguousarray(values)
@validatedproperty
def grad(values):
"""
Array for storing derivatives of likelihood with respect to mock data.
"""
return np.ascontiguousarray(values)
@property
def beta(self):
"""
Inverse temperature.
"""
return self._beta.get()
@beta.setter
def beta(self, value):
self._beta.set(value)
def __init__(self, name, data, mock, beta=1.0, params=None):
"""Likelihood
Initialize likelihood by providing a name, the raw data
and a theory for calculating idealized obserations.
Parameters
----------
name : string
name of the likelihood function
data : iterable
list of raw data points
mock : instance of Parameters
theory for calculating idealized data (needs to implement
update_forces)
beta : non-negative float
inverse temperature used in tempering and annealing
"""
super(Likelihood, self).__init__(name, params)
self.data = data
self.mock = mock
self.grad = np.zeros(data.shape)
self._beta = Scale(self.name + '.beta')
self.params.add(self._beta)
self.beta = beta
def update(self):
self.mock.update(self.params)
def update_derivatives(self):
"""
Calculate derivative of log likelihood with respect to mock
data.
"""
raise NotImplementedError
def update_forces(self):
"""
Update Cartesian forces by applying the chain rule.
"""
self.update_derivatives()
self.mock.update_forces(self.grad, self.params)
class Normal(Likelihood):
"""Normal
Likelihood implementing a Normal distribution. It has a single
nuisance parameter: the precision, i.e. inverse variance
"""
@property
def precision(self):
"""
Inverse variance
"""
return self._precision
@property
def tau(self):
return self._precision.get()
@tau.setter
def tau(self, value):
self._precision.set(value)
@property
def sigma(self):
"""
Standard deviation
"""
return 1 / self.tau**0.5
def __init__(self, name, data, mock, precision=1.0, params=None):
super(Normal, self).__init__(name, data, mock, params=params)
self._precision = Precision(self.name + '.precision')
self.tau = precision
@property
def logZ(self):
"""
Normalization constant of the Normal distribution
"""
return - 0.5 * len(self.data) * np.log(0.5 * self.tau / np.pi)
def log_prob(self):
diff = self.mock.get() - self.data
log_prob = - 0.5 * self.tau * np.dot(diff,diff) - self.logZ
return self.beta * log_prob
def update_derivatives(self):
self.grad[...] = self.beta * self.tau * (self.data - self.mock.get())
def __str__(self):
s = super(Normal, self).__str__()
return s.replace(')', ', precision={0:0.3f})'.format(self.tau))
class LowerUpper(Normal):
"""LowerUpper
Error model implementing a Normal distribution with a flat plateau. The
start and end of the plateau are marked by lower bounds (stored in 'lower')
and upper bounds (stored in 'upper')
"""
@validatedproperty
def lower(values):
return np.ascontiguousarray(values)
@validatedproperty
def upper(values):
return np.ascontiguousarray(values)
@property
def logZ(self):
"""
Normalization constant
"""
from .lowerupper import logZ
return logZ(self.lower, self.upper, self.tau)
def __init__(self, name, data, mock, lower, upper, precision=1.0, params=None):
super(LowerUpper, self).__init__(name, data, mock, precision, params=params)
self.lower = lower
self.upper = upper
self.validate()
def log_prob(self):
from .lowerupper import log_prob
lgp = log_prob(self.data, self.mock.get(), self.lower, self.upper)
return 0.5 * self.beta * self.tau * lgp - self.beta * self.logZ
def update_derivatives(self):
from .lowerupper import update_derivatives
update_derivatives(self.mock.get(), self.grad, self.lower,
self.upper, self.beta * self.tau)
def validate(self):
if np.any(self.lower > self.upper):
msg = 'Lower bounds must be smaller than upper bounds'
raise ValueError(msg)
class Logistic(Likelihood):
"""Logistic
Logistic likelihood for binary observations.
"""
@property
def steepness(self):
"""
Steepness of logistic function.
"""
return self._steepness
@property
def alpha(self):
"""
Returns the current value of the steepness parameter.
"""
return self._steepness.get()
@alpha.setter
def alpha(self, value):
self._steepness.set(value)
def __init__(self, name, data, mock, steepness=1.0, params=None):
super(Logistic, self).__init__(name, data, mock, params=params)
self._steepness = Scale(self.name + '.steepness')
self.alpha = steepness
def log_prob(self):
from .logistic import log_prob
return self.beta * log_prob(self.data, self.mock.get(), self.alpha)
def update_derivatives(self):
from .logistic import update_derivatives
update_derivatives(self.data, self.mock.get(), self.grad, self.alpha)
self.grad *= self.beta
def __str__(self):
s = super(Logistic, self).__str__()
s = s.replace(')', ', steepness={0:0.3f})'.format(self.alpha))
return s
class Relu(Logistic):
"""Relu
Relu likelihood for binary observations.
"""
def log_prob(self):
from .relu import log_prob
return self.beta * log_prob(self.data, self.mock.get(), self.alpha)
def update_derivatives(self):
from .relu import update_derivatives
## self.grad[...] = 0.
update_derivatives(self.data, self.mock.get(), self.grad, self.alpha)
self.grad *= self.beta
| |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from threading import Thread
import time
from mcrouter.test.MCProcess import McrouterClient, Memcached, Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestMcrouterBasicBase(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
null_route_config = './mcrouter/test/test_nullroute.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(self.make_memcached())
def get_mcrouter(self, additional_args=()):
extra_args = self.extra_args[:]
extra_args.extend(additional_args)
return self.add_mcrouter(self.config, extra_args=extra_args)
class TestMcrouterBasic(TestMcrouterBasicBase):
def test_basic_lease(self):
mcr = self.get_mcrouter()
result = mcr.leaseGet("testkey")
real_token = result["token"]
self.assertNotEqual(real_token, None)
result["value"] = "newvalue"
result["token"] = 42000
self.assertFalse(mcr.leaseSet("testkey", result))
result["token"] = real_token
self.assertTrue(mcr.leaseSet("testkey", result))
result2 = mcr.leaseGet("testkey")
self.assertEqual(result2["token"], None)
self.assertEqual(result2["value"], "newvalue")
# lease-get followed by a delete means the next lease-set will fail
result = mcr.leaseGet("newtestkey")
self.assertFalse(mcr.delete("newtestkey"))
self.assertFalse(mcr.leaseSet("newtestkey", result))
def test_invalid_key(self):
"""
Tests behavior when mcrouter routes keys which have prefixes that are
not in the config.
"""
mcr = self.get_mcrouter()
invalid_key = '/blah/bloh/key'
self.assertFalse(mcr.set(invalid_key, 'value'))
self.assertEqual(mcr.get(invalid_key), "SERVER_ERROR local error")
def test_stats(self):
mcr = self.get_mcrouter(['--proxy-threads=8'])
# Stats without args
res = mcr.issue_command_and_read_all('stats\r\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats \r\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats\n')
self.assertIsNotNone(res)
res = mcr.issue_command_and_read_all('stats \n')
self.assertIsNotNone(res)
# Stats with args
args = ['detailed', 'cmd-error', 'servers', 'suspect_servers', 'count']
for arg in args:
res = mcr.issue_command_and_read_all('stats{0}\r\n'.format(arg))
self.assertTrue('CLIENT_ERROR' in res)
res = mcr.issue_command_and_read_all('stats {0}\r\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats {0} \r\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats{0}\n'.format(arg))
self.assertTrue('CLIENT_ERROR' in res)
res = mcr.issue_command_and_read_all('stats {0}\n'.format(arg))
self.assertTrue('END' in res)
res = mcr.issue_command_and_read_all('stats {0} \n'.format(arg))
self.assertTrue('END' in res)
# Stats with invalid arg
res = mcr.issue_command_and_read_all('stats invalid_option\r\n')
self.assertTrue('CLIENT_ERROR' in res)
def test_stats_deadlock(self):
mcr = self.get_mcrouter(['--proxy-threads=8'])
def run_client(fail, port):
mc = McrouterClient(port)
mc.connect()
for _ in range(1000):
s = mc.stats()
if not s:
fail[0] = True
return
f = [False]
ts = [Thread(target=run_client, args=(f, mcr.port)) for i in range(8)]
[t.start() for t in ts]
[t.join() for t in ts]
self.assertFalse(f[0])
def test_basic_cas(self):
mcr = self.get_mcrouter()
self.assertIsNone(mcr.cas('key', 'value', 1))
self.assertIsNone(mcr.gets('key'))
self.assertTrue(mcr.add('key', 'value'))
ret = mcr.gets('key')
self.assertIsNotNone(ret)
old_cas = ret['cas']
self.assertEqual(ret['value'], 'value')
self.assertTrue(mcr.cas('key', 'value2', ret["cas"]))
ret = mcr.gets('key')
self.assertEqual(ret['value'], 'value2')
self.assertNotEqual(old_cas, ret['cas'])
self.assertTrue(mcr.set('key', 'value2'))
self.assertFalse(mcr.cas('key', 'value3', ret['cas']))
self.assertEqual(mcr.gets('key')['value'], 'value2')
def test_shutdown(self):
mcr = self.get_mcrouter()
mcr.shutdown()
time.sleep(2)
self.assertFalse(mcr.is_alive())
def test_double_bind(self):
mcr1 = self.get_mcrouter()
mcr2 = Mcrouter(self.null_route_config, port=mcr1.port)
self.assertTrue(mcr1.is_alive())
retries = 20
while mcr2.is_alive() and retries > 0:
retries = retries - 1
time.sleep(1)
self.assertFalse(mcr2.is_alive())
def test_set_exptime(self):
mcr = self.get_mcrouter()
# positive
self.assertTrue(mcr.set('key', 'value', exptime=10))
self.assertEqual(mcr.get('key'), 'value')
# negative
self.assertTrue(mcr.set('key', 'value', exptime=-10))
self.assertIsNone(mcr.get('key'))
class TestMcrouterBasicTouch(TestMcrouterBasicBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_mock_mc = True
def test_basic_touch(self):
mcr = self.get_mcrouter()
# positive
self.assertTrue(mcr.set('key', 'value', exptime=0))
self.assertEqual(mcr.get('key'), 'value')
self.assertEqual(mcr.touch('key', 20), "TOUCHED")
self.assertEqual(mcr.get('key'), 'value')
# negative
self.assertEqual(mcr.touch('fake_key', 20), "NOT_FOUND")
self.assertIsNone(mcr.get('fake_key'))
# negative exptime
self.assertTrue(mcr.set('key1', 'value', exptime=10))
self.assertEqual(mcr.get('key1'), 'value')
self.assertEqual(mcr.touch('key1', -20), "TOUCHED")
self.assertIsNone(mcr.get('key1'))
# past
self.assertTrue(mcr.set('key2', 'value', exptime=10))
self.assertEqual(mcr.get('key'), 'value')
self.assertEqual(mcr.touch('key', 1432250000), "TOUCHED")
self.assertIsNone(mcr.get('key'))
class TestMcrouterBasicGat(TestMcrouterBasicBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_basic_gat(self):
mcr = self.get_mcrouter()
# set ttl to 3 seconds.
self.assertTrue(mcr.set('key', 'value', exptime=3))
# bump ttl to 10 seconds from now.
self.assertEqual(mcr.gat(10, 'key'), 'value')
# sleep for 4 seconds: the item shouldn't have expired
time.sleep(4)
self.assertEqual(mcr.get('key'), 'value')
def test_basic_gats(self):
mcr = self.get_mcrouter()
# set ttl to 3 seconds.
self.assertTrue(mcr.set('key', 'value', exptime=3))
# bump ttl to 10 seconds from now.
ret = mcr.gats(10, 'key')
self.assertEqual(ret['value'], 'value')
self.assertTrue(ret['cas'])
# sleep for 4 seconds: the item shouldn't have expired,
# and the cas should succeed
time.sleep(4)
self.assertEqual(mcr.get('key'), 'value')
self.assertTrue(mcr.cas('key', 'value2', ret['cas']))
class TestMcrouterInvalidRouteBase(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_1_1_1.json'
extra_args = ['--send-invalid-route-to-default']
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc = self.add_server(self.make_memcached())
def get_mcrouter(self, additional_args=()):
extra_args = self.extra_args[:]
extra_args.extend(additional_args)
return self.add_mcrouter(self.config, extra_args=extra_args)
class TestMcrouterInvalidRoute(TestMcrouterInvalidRouteBase):
def test_basic_invalid_route(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertTrue(mcr.set("/././key", "value2"))
self.assertEqual(mcr.get("/././key"), "value2")
self.assertEqual(mcr.get("/f/f/key"), "value2")
self.assertEqual(mcr.get("/test/test/key"), "value2")
self.assertEqual(mcr.get("key"), "value2")
self.assertTrue(mcr.set("/a/a/key", "value3"))
self.assertEqual(mcr.get("/a/a/key"), "value3")
self.assertEqual(mcr.get("key"), "value3")
self.assertTrue(mcr.set("/*/a/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
self.assertTrue(mcr.set("/*/*/key", "value4"))
self.assertEqual(mcr.get("/a/a/key"), "value4")
self.assertEqual(mcr.get("key"), "value4")
class TestMcrouterInvalidRouteAppendPrepend(TestMcrouterInvalidRouteBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_mock_mc = True
def test_basic_invalid_route(self):
mcr = self.get_mcrouter()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.append("/*/*/key", "abc"), "STORED")
self.assertEqual(mcr.get("/a/a/key"), "valueabc")
self.assertEqual(mcr.get("key"), "valueabc")
self.assertEqual(mcr.prepend("/*/*/key", "123"), "STORED")
self.assertEqual(mcr.get("/a/a/key"), "123valueabc")
self.assertEqual(mcr.get("key"), "123valueabc")
class TestMcrouterBasic2(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_basic_2_1_1.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self, additional_args=()):
extra_args = self.extra_args[:]
extra_args.extend(additional_args)
return self.add_mcrouter(
self.config, '/a/a/', extra_args=extra_args)
def test_prefix_routing(self):
mcr = self.get_mcrouter()
# first test default routing prefix
self.mc1.set("cluster1_key", "cluster1")
self.assertEqual(mcr.get("cluster1_key"), "cluster1")
# next set to a remote cluster
mcr.set("/b/b/cluster2_key_router", "cluster2_router")
self.assertEqual(
self.mc2.get("cluster2_key_router"), "cluster2_router")
# try fetching a value from a remote cluster
self.mc2.set("cluster2_key", "cluster2")
self.assertEqual(self.mc2.get("cluster2_key"), "cluster2")
self.assertEqual(mcr.get("/b/b/cluster2_key"), "cluster2")
def test_delete(self):
mcr = self.get_mcrouter()
mcr.set('foobarbizbang', 'some_value')
self.assertTrue(mcr.delete('foobarbizbang'))
self.assertFalse(mcr.delete('foobarbizbang2'))
self.assertTrue(mcr.set('hello', 'world'))
self.assertEqual(mcr.get('hello'), 'world')
def test_use_big_value(self):
mcr = self.get_mcrouter(['--big-value-split-threshold=100'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('big-value'), 1)
def test_no_big_value(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('big-value', reply)
def test_enable_logging_route(self):
mcr = self.get_mcrouter(['--enable-logging-route'])
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertEqual(reply.count('logging'), 1)
def test_no_logging_route(self):
mcr = self.get_mcrouter()
reply = mcr.get('__mcrouter__.route_handles(get,test)')
self.assertNotIn('logging', reply)
class TestBasicAllSyncBase(McrouterTestCase):
config = './mcrouter/test/test_basic_all_sync.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(self.make_memcached())
self.mc2 = self.add_server(self.make_memcached())
self.mc3 = self.add_server(self.make_memcached())
class TestBasicAllSync(TestBasicAllSyncBase):
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_sync(self):
"""
Tests that the responses are being aggregated and the most awful
(based on the awfulness map) is begin returned
"""
mcr = self.get_mcrouter()
# set key in three cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.gat(0, "key"), "value")
self.assertTrue(mcr.gats(0, "key"))
# delete will return True on DELETED
# will return False on NOT_FOUND
# perform a delete and check the response
# the aggregated response should be DELETED
self.assertTrue(mcr.delete("key"))
# set key in only one cluster
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the aggregated response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
class TestBasicAllSyncAppendPrependTouch(TestBasicAllSyncBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_mock_mc = True
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_append_prepend_all_sync(self):
"""
Tests that append and prepend work with AllSync. We rely on these
tests to verify correctness of append/prepend since we don't use
these commands in production.
"""
mcr = self.get_mcrouter()
mcr.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.append("key", "abc"), "STORED")
self.assertEqual(mcr.prepend("key", "123"), "STORED")
self.assertEqual(self.mc1.get("key"), "123valueabc")
self.assertEqual(self.mc2.get("key"), "123valueabc")
self.assertEqual(self.mc3.get("key"), "123valueabc")
self.assertEqual(mcr.get("key"), "123valueabc")
self.mc1.set("key2", "value")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertEqual(self.mc1.append("key2", "xyz"), "STORED")
self.assertEqual(self.mc1.get("key2"), "valuexyz")
self.assertFalse(mcr.get("key2"))
self.mc1.set("key3", "value")
self.assertEqual(self.mc1.get("key3"), "value")
self.assertEqual(self.mc1.prepend("key3", "xyz"), "STORED")
self.assertEqual(self.mc1.get("key3"), "xyzvalue")
self.assertFalse(mcr.get("key3"))
def test_touch_all_sync(self):
mcr = self.get_mcrouter()
mcr.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.touch("key", 3600), "TOUCHED")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
self.mc1.set("key2", "value")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertEqual(self.mc1.touch("key2", 3600), "TOUCHED")
self.assertEqual(self.mc1.get("key2"), "value")
self.assertFalse(mcr.get("key2"))
mcr.set("key3", "value")
self.assertEqual(self.mc1.get("key3"), "value")
self.assertEqual(self.mc1.touch("key3", -10), "TOUCHED")
self.assertEqual(self.mc2.get("key3"), "value")
self.assertEqual(self.mc3.get("key3"), "value")
self.assertFalse(mcr.get("key3"))
def test_gat_all_sync(self):
mcr = self.get_mcrouter()
mcr.set("key", "value")
self.assertEqual(self.mc1.gat(0, "key"), "value")
self.assertEqual(self.mc2.gat(0, "key"), "value")
self.assertEqual(self.mc3.gat(0, "key"), "value")
self.assertEqual(mcr.gat(-10, "key"), "value")
self.assertFalse(mcr.gat(0, "key"))
self.assertFalse(mcr.gats(0, "key"))
self.assertFalse(mcr.gat(0, "key"))
self.assertFalse(mcr.gats(0, "key"))
class TestBasicAllFirst(McrouterTestCase):
config = './mcrouter/test/test_basic_all_first.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_first(self):
"""
Tests that the first non-tko response is returned
"""
mcr = self.get_mcrouter()
self.mc1.terminate()
self.assertTrue(mcr.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.gat(0, "key"), "value")
self.assertTrue(mcr.gats(0, "key"))
class TestBasicAllMajority(McrouterTestCase):
config = './mcrouter/test/test_basic_all_majority.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
self.mc4 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_basic_all_majority(self):
"""
Tests that the majority response (ties broken by awfulness) is being
returned
"""
mcr = self.get_mcrouter()
# set key in four cluster
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.mc3.set("key", "value")
self.mc4.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
self.assertEqual(self.mc4.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
# perform a delete and check the response
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in three clusters
self.assertTrue(self.mc1.set("key", "value"))
self.assertTrue(self.mc2.set("key", "value"))
self.assertTrue(self.mc3.set("key", "value"))
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
self.assertEqual(self.mc3.get("key"), "value")
# the majority response should be DELETED
self.assertTrue(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in only one clusters
self.mc1.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
# the majority response should be NOT_FOUND
self.assertFalse(mcr.delete("key"))
# make sure all deletes complete (otherwise they can race
# with the sets below)
time.sleep(1)
# set key in two out of four clusters
self.mc1.set("key", "value")
self.mc2.set("key", "value")
self.assertEqual(self.mc1.get("key"), "value")
self.assertEqual(self.mc2.get("key"), "value")
# the majority response should be NOT_FOUND
# since it is sorted by awfulness map
self.assertFalse(mcr.delete("key"))
class TestBasicFailover(McrouterTestCase):
config = './mcrouter/test/test_basic_failover.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover(self):
"""
Tests that the failover path works.
"""
# default path is mctestc01
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(self.mc1.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Set in the failover and check.
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
self.assertEqual(mcr.get("key"), "value")
def test_failover_negative_exptime(self):
mcr = self.get_mcrouter()
# Go through the default route and verify a get.
self.assertTrue(mcr.set("key", "value", exptime=0))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
self.mc1.terminate()
# Go through the failover now.
# We assert twice since in the first call mcrouter will discover
# a tko host and it short circuits the second time.
self.assertEqual(mcr.get("key"), None)
self.assertEqual(mcr.get("key"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key", "value"))
self.assertEqual(mcr.get("key"), "value")
# Exptime using negative value: past
self.assertTrue(mcr.set("key", "value", exptime=-10))
self.assertIsNone(mcr.get("key"))
class TestBasicFailoverOverride(McrouterTestCase):
config = './mcrouter/test/test_basic_failover_override.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover_override(self):
"""
Tests that the failover overrides work.
"""
mcr = self.get_mcrouter()
# See that failovers are disabled for cluster1
self.mc1.terminate()
self.assertEqual(mcr.set("key1", "value1"), None)
self.assertEqual(mcr.get("key1"), None)
self.assertEqual(mcr.get("key1"), None)
# Check get failover still works
self.assertTrue(self.mc2.set("key2", "value2"))
self.assertEqual(mcr.get("key2"), "value2")
self.assertEqual(mcr.get("key2"), "value2")
class TestBasicFailoverLeastFailures(McrouterTestCase):
"""
The main purpose of this test is to make sure LeastFailures policy
is parsed correctly from json config. We rely on cpp tests to stress
correctness of LeastFailures failover policy.
"""
config = './mcrouter/test/test_basic_failover_least_failures.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.mc1 = self.add_server(Memcached())
self.mc2 = self.add_server(Memcached())
self.mc3 = self.add_server(Memcached())
self.mc4 = self.add_server(Memcached())
def get_mcrouter(self):
return self.add_mcrouter(self.config, extra_args=self.extra_args)
def test_failover_least_failures(self):
mcr = self.get_mcrouter()
self.assertTrue(self.mc4.set("key", "value"))
self.mc1.terminate()
self.mc2.terminate()
self.mc3.terminate()
# Main child #1 fails, as do 2 and 3. No request to 4 since
# max_tries = 3
self.assertEqual(mcr.get("key"), None)
# Now 4 has least errors.
self.assertEqual(mcr.get("key"), "value")
class TestMcrouterBasicL1L2(McrouterTestCase):
config = './mcrouter/test/test_basic_l1_l2.json'
config_ncache = './mcrouter/test/test_basic_l1_l2_ncache.json'
extra_args = []
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_l2_get(self):
"""
Tests that get requests using l1/l2 caching and result upgrading is working
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set keys in only l1 pool
self.l1.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
# perform a get and check the response
self.assertTrue(mcr.get("key1"), "value1")
self.assertTrue(mcr.gat(0, "key1"), "value1")
self.assertTrue(mcr.gats(0, "key1"))
# set key only in l2 pool
self.l2.set("key2", "value2")
self.assertEqual(self.l2.get("key2"), "value2")
self.assertEqual(self.l1.get("key2"), None)
# perform a get and check the response
self.assertEqual(mcr.get("key2"), "value2")
self.assertEqual(mcr.gat(0, "key2"), "value2")
self.assertTrue(mcr.gats(0, "key2"))
# perform the same get until it gets upgraded to l1
# if the test gets stuck in an infinite loop here upgrading results is
# not working
while self.l1.get("key2") != "value2":
self.assertEqual(mcr.get("key2"), "value2")
def test_l1_l2_get_l1_down(self):
"""
Tests that gets using l1/l2 caching is working when l1 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l1 pool
self.l1.terminate()
# we should still be able to get from l2
self.assertEqual(mcr.get("key1"), "value1")
self.assertEqual(mcr.gat(0, "key1"), "value1")
self.assertTrue(mcr.gats(0, "key1"))
def test_l1_l2_get_l2_down(self):
"""
Tests that gets using l1/l2 caching is working when l2 is down
"""
mcr = self.get_mcrouter(self.config)
# set key in l1 and l2 pools
self.l1.set("key1", "value1")
self.l2.set("key1", "value1")
self.assertEqual(self.l1.get("key1"), "value1")
self.assertEqual(self.l2.get("key1"), "value1")
# terminate the l2 regional pool
self.l2.terminate()
# we should still be able to get from l1
self.assertTrue(mcr.get("key1"), "value1")
self.assertTrue(mcr.gat(0, "key1"), "value1")
self.assertTrue(mcr.gats(0, "key1"))
# terminate l1 pool as well
self.l1.terminate()
# we should get nothing back
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.gat(0, "key1"))
self.assertFalse(mcr.gats(0, "key1"))
def test_l1_l2_get_ncache(self):
mcr = self.get_mcrouter(self.config_ncache)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(self.l1.get("key1"), "ncache")
self.assertTrue(self.l2.set("key1", "value1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
self.assertFalse(mcr.get("key1"))
time.sleep(1)
self.assertEqual(mcr.get("key1"), "value1")
self.assertEqual(self.l1.get("key1"), "value1")
def test_l1_l2_gat_ncache(self):
mcr = self.get_mcrouter(self.config_ncache)
# get a non-existent key
self.assertFalse(mcr.gat(0, "key1"))
time.sleep(1)
self.assertEqual(self.l1.gat(0, "key1"), "ncache")
self.assertTrue(self.l2.set("key1", "value1"))
self.assertFalse(mcr.gat(0, "key1"))
self.assertFalse(mcr.gat(0, "key1"))
self.assertFalse(mcr.gat(0, "key1"))
self.assertFalse(mcr.gats(0, "key1"))
self.assertFalse(mcr.gats(0, "key1"))
time.sleep(1)
self.assertEqual(mcr.gat(0, "key1"), "value1")
self.assertEqual(self.l1.gat(0, "key1"), "value1")
class TestMcrouterBasicL1MultipleL2SizeSplit(McrouterTestCase):
config_multil2 = './mcrouter/test/test_basic_l1_multiple_l2_sizesplit.json'
extra_args = []
MC_MSG_FLAG_SIZE_SPLIT = 0x20
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2_1 = self.add_server(Memcached())
self.l2_2 = self.add_server(Memcached())
self.l2_3 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_multiple_l2_sizesplit_get_after_leaseset(self):
"""
Basic functionality tests. Simple Get after Set-lease should work
in a setup with single L1 and multiple L2's.
"""
mcr = self.get_mcrouter(self.config_multil2)
# Issue leaseGet with a non-existing key, we should get a valid lease token
result = mcr.leaseGet("key1")
real_token = result["token"]
self.assertNotEqual(real_token, None)
# Issue leaseSet with a very long value, the key should end up split
long_value = "foo" * 200
result["value"] = long_value
self.assertTrue(mcr.leaseSet("key1", result))
# Verify that sentinel flag is set in L1
l1res = self.l1.get("key1", return_all_info=True)
self.assertTrue(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
# Issue simple Get. It should ALWAYS get the original long value.
self.assertEqual(mcr.get("key1"), long_value)
class TestMcrouterBasicL1L2SizeSplit(McrouterTestCase):
config = './mcrouter/test/test_basic_l1_l2_sizesplit.json'
config_bothset = './mcrouter/test/test_basic_l1_l2_sizesplit_bothset.json'
extra_args = []
MC_MSG_FLAG_SIZE_SPLIT = 0x20
def setUp(self):
# The order here corresponds to the order of hosts in the .json
self.l1 = self.add_server(Memcached())
self.l2 = self.add_server(Memcached())
def get_mcrouter(self, config):
return self.add_mcrouter(config, extra_args=self.extra_args)
def test_l1_l2_sizesplit_get(self):
"""
Basic functionality tests. Sets go to the right place, gets route properly
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set small key
mcr.set("key1", "value1")
# small key should be normal value in L1
self.assertEqual(self.l1.get("key1"), "value1")
# small key shouldn't be in L2
self.assertFalse(self.l2.get("key1"))
# perform a get and check the response
self.assertEqual(mcr.get("key1"), "value1")
# key should end up split
value2 = "foo" * 200
mcr.set("key2", value2)
# response should be zero bytes and have the flag
l1res = self.l1.get("key2", return_all_info=True)
self.assertEqual(l1res["value"], "")
self.assertTrue(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
self.assertNotEqual(self.l1.get("key2"), "value1")
# full value on L2
self.assertEqual(self.l2.get("key2"), value2)
# get should run the internal redirect, give us L2 value
self.assertEqual(mcr.get("key2"), value2)
self.assertNotEqual(mcr.get("key2"), "")
def test_l1_l2_sizesplit_bothget(self):
"""
Basic functionality. Allow full setst to both pools.
"""
mcr = self.get_mcrouter(self.config_bothset)
self.assertFalse(mcr.get("key1"))
# small key should only exist in L1
mcr.set("key1", "value1")
# small key should be normal value in L1
self.assertEqual(self.l1.get("key1"), "value1")
# small key shouldn't be in L2
self.assertFalse(self.l2.get("key1"), "value1")
# perform a get and check the response
self.assertEqual(mcr.get("key1"), "value1")
# key should end up split. end up in both pools.
value2 = "foo" * 200
mcr.set("key2", value2)
# The write to L2 is async and we're checking it right away.
time.sleep(1)
self.assertEqual(self.l1.get("key2"), value2)
self.assertEqual(self.l2.get("key2"), value2)
self.assertEqual(mcr.get("key2"), value2)
def test_l1_l2_get_l2_down(self):
"""
If L2 is down, do we get expected errors.
"""
mcr = self.get_mcrouter(self.config)
value = "foob" * 200
mcr.set("key", value)
self.l2.terminate()
self.assertEqual(self.l1.get("key"), "")
self.assertFalse(mcr.get("key"))
def test_l1_l2_cas(self):
"""
Tests that gets requests using l1/l2 caching and result upgrading is working
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key"))
# set small key which should go to L1
mcr.set("key", "value1")
# Do a gets on key
res = mcr.gets("key")
self.assertIsNotNone(res)
# Get cas token
cas = res['cas']
self.assertTrue(mcr.cas('key', 'value2', cas))
# Do another gets and check that cas token has changed
res = mcr.gets("key")
self.assertIsNotNone(res)
self.assertEqual(res["value"], "value2")
self.assertNotEqual(cas, res["cas"])
# Check item on L1 is not a sentinel
l1res = self.l1.get("key", return_all_info=True)
self.assertIsNotNone(l1res)
self.assertFalse(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
def test_l1_l2_cas_large(self):
"""
Tests that large gets requests using l1/l2 caching / result upgrading.
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set large key which should go to L2
value = "foo" * 200
mcr.set("key", value)
# Do a gets on key and check its a hit
res = mcr.gets("key")
self.assertIsNotNone(res)
# Do a cas on item and check successful
cas = res['cas']
self.assertTrue(mcr.cas('key', 'value_modified', cas))
# Do another gets and check that cas token has changed
res = mcr.gets("key")
self.assertIsNotNone(res)
self.assertNotEqual(cas, res["cas"])
# Check item on L1 is not sentinel given that CAS always sets to L1
l1res = self.l1.get("key", return_all_info=True)
self.assertIsNotNone(l1res)
self.assertFalse(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
self.assertEqual(l1res["value"], "value_modified")
def test_l1_l2_cas_large_fail(self):
"""
Tests that subsequent cas using same token fail in L1/L2 caching fail
"""
mcr = self.get_mcrouter(self.config)
# get a non-existent key
self.assertFalse(mcr.get("key1"))
# set large key which should go to L2
value = "foo" * 200
mcr.set("key", value)
# Do a gets on key and check its a hit
res = mcr.gets("key")
self.assertIsNotNone(res)
# Do a cas on item and check successful
cas = res['cas']
self.assertTrue(mcr.cas('key', 'value_modified', cas))
# Do another gets and check that cas token has changed
res = mcr.gets("key")
self.assertIsNotNone(res)
self.assertNotEqual(cas, res["cas"])
# Do another cas using the same token and check it fails
self.assertFalse(mcr.cas('key', 'value_modified2', cas))
def test_l1l2_l1_hit_l2_lookup_fail(self):
"""
Basic functionality test in L1L2Split
Verify that L1 sentinel is deleted whenever there is a L1 hit but a L2 lookup failure.
"""
mcr = self.get_mcrouter(self.config)
# Issue leaseGet with a non-existing key, we should get a valid lease token
result = mcr.leaseGet("key1")
token = result["token"]
self.assertNotEqual(token, None)
# Issue leaseSet with a very long value, the key should end up split
long_value = "foo" * 20
result["value"] = long_value
self.assertTrue(mcr.leaseSet("key1", result))
# Verify that sentinel flag is set in L1
l1res = self.l1.get("key1", return_all_info=True)
self.assertTrue(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
# Simulate a TKO on L2
self.l2.terminate()
# Subsequent Get with same key should fail. Key should no longer be found in L1
self.assertNotEqual(mcr.get("key1"), long_value)
self.assertFalse(self.l1.get("key1"))
# Subsequent attempts to refill should go to L1. Verify lease-get/refill/get works
# with no errors
result = mcr.leaseGet("key1")
token = result["token"]
self.assertNotEqual(token, None)
long_value = "foo" * 20
result["value"] = long_value
self.assertTrue(mcr.leaseSet("key1", result))
self.assertEqual(mcr.get("key1"), long_value)
# Sentinel should not be found in L1
l1res = self.l1.get("key1", return_all_info=True)
self.assertFalse(l1res["flags"] & self.MC_MSG_FLAG_SIZE_SPLIT)
class TestMcrouterPortOverride(McrouterTestCase):
config = './mcrouter/test/mcrouter_test_portoverride.json'
def test_portoverride(self):
mc = self.add_server(Memcached())
self.port_map = {}
extra_args = ['--config-params', 'PORT:{}'.format(mc.getport())]
mcr = self.add_mcrouter(self.config, extra_args=extra_args)
self.assertTrue(mcr.set('key', 'value'))
self.assertEqual(mcr.get('key'), 'value')
class TestMcrouterWithRetries(McrouterTestCase):
valid_config_with_retries = \
"./mcrouter/test/test_basic_l1_l2_sizesplit_retry_valid.json"
invalid_config_with_retries = \
"./mcrouter/test/test_basic_l1_l2_sizesplit_retry_invalid.json"
extra_args = ["--validate-config=run"]
def test_valid_retries(self):
mcr = self.add_mcrouter(self.valid_config_with_retries,
extra_args=self.extra_args)
self.assertTrue(self._is_mcrouter_running(mcr))
def test_invalid_retries(self):
mcr = self.add_mcrouter(self.invalid_config_with_retries,
extra_args=self.extra_args)
self.assertFalse(self._is_mcrouter_running(mcr))
| |
import logging
from collections import Iterable
from inspect import isclass, isroutine, getmembers
from shark.common import Default
from shark.dependancies import escape_url, escape_html
class BaseParamConverter(object):
@classmethod
def convert(cls, value, parent_object):
return value
class EnumerationMeta(type):
@property
def value_map(cls):
if not cls._value_map:
obj = cls()
cls._value_map = {
obj.__getattribute__(name): name
for name in dir(cls)
if name not in dir(Enumeration) and isinstance(obj.__getattribute__(name), int)
}
return cls._value_map
@property
def str_map(cls):
if not cls._str_map:
#TODO: Is this line enough?
cls._str_map = {value: key for key, value in cls.value_map.items()}
obj = cls()
cls._str_map = {
name: obj.__getattribute__(name)
for name in dir(cls)
if name not in dir(Enumeration) and isinstance(obj.__getattribute__(name), int)
}
return cls._str_map
class Enumeration(BaseParamConverter, metaclass=EnumerationMeta):
_value_map = None
_str_map = None
@classmethod
def name(cls, value):
if value in cls.value_map:
return cls.value_map[value]
else:
raise ValueError()
@classmethod
def names(cls):
return cls.value_map.values()
@classmethod
def from_str(cls, value):
if value in cls.str_map:
return cls.str_map[value]
else:
raise ValueError()
@classmethod
def convert(cls, value, parent_object):
if value is None or isinstance(value, int):
return value
elif str(value) in cls.str_map:
try:
value = cls.from_str(value)
if value is not None:
return value
except ValueError:
pass
raise TypeError('Parameter isn\'t of type {}'.format(cls.__name__))
@classmethod
def choices(cls):
members = getmembers(cls, lambda m: not(isroutine(m)))
props = [m for m in members if not(m[0][:1] == '_')]
choices = tuple([(p[1], p[0]) for p in props])
return choices
class Value(object):
"""
Values passed into Shark Objects as parameters or attributes can have special conversion before passed to Shark.
Any object passed into Shark derived from Value will not be used directly, but result from the as_param and
as_attr functions will be used.
"""
def as_param(self):
return self
# noinspection PyUnusedLocal
def set_attr(self, obj, name):
return name + "='" + escape_url(str(self)) + "'"
class StringParam(BaseParamConverter):
"""
Turns the value into an html-safe str.
None will become the empty string.
To other objects the str() method will be applied.
"""
@classmethod
def convert(cls, value, parent_object):
if value is not None:
if isinstance(value, str):
return escape_html(value)
else:
return escape_html(str(value))
return ''
class BaseObject(object):
"""
"""
class Object(BaseObject, BaseParamConverter):
"""
Objects are the main building blocks in Shark. Trees of classes derived from Object get rendered into
html, js, etc.
"""
# Every class derived from Object keeps a counter to create unique html ids for the objects
object_number = 0
# noinspection PyAttributeOutsideInit
def init(self, kwargs):
"""
Called with the kwargs of the custom __init__ function of any subclass. This function must be called.
:param kwargs: The kwargs that were passed into the __init__ function. Don't prepend the **, just pass the
kwargs dictionary. The following kwargs are available:
:return: None
"""
self._id = kwargs.pop('id', None)
self._attributes = {}
self._parent = None
self._variables = {}
for key, value in kwargs.items():
key = key.strip('_')
if isinstance(value, Value):
value.set_attr(self, key)
else:
self._attributes[key] = value
def __str__(self):
return '<{} - {}>'.format(self.__class__.__name__, self._id)
def __repr__(self):
return '<{} - {}>'.format(self.__class__.__name__, self._id)
@classmethod
def convert(cls, value, parent_object):
if value is None or isinstance(value, cls):
return value
else:
raise TypeError('Parameter not of class {}'.format(cls.__name__))
# noinspection PyAttributeOutsideInit
@property
def id(self):
"""
The id gets created when it's first requested. Elements for which the id is never requested, don't have an id.
:return: The created or existing html id
"""
if not self._id:
self.__class__.object_number += 1
self._id = '%s_%s' % (self.__class__.__name__, self.__class__.object_number)
return self._id
def id_needed(self):
return self.id
@property
def parent(self):
return self._parent
# noinspection PyUnusedLocal
def param(self, value, converter, description='', default=None):
"""
Converts a parameter passed in a Shark Object
:param value: Value of the parameter
:param converter: How to treat the input, pass in a subclass of BaseParamConverter
:param description: This is used for documentation purposes
:param default: The default value used if the Default class is passed in
:return: The supplied value converted according to the type.
"""
if value == Default:
value = default
# If Default is the default value you can detect whether or not this parameter was provided or not in
# your rendering code.
if value == Default:
return Default
# If a Value object was passed in, get the actual value.
if isinstance(value, Value):
value = value.as_param()
if isclass(converter) and issubclass(converter, BaseParamConverter):
return converter.convert(value, self)
else:
raise TypeError('type should be derived from BaseParamConverter')
@property
def base_attributes(self):
output = []
if self._id:
output.append('id="{}"'.format(self.id))
for attr, value in self._attributes.items():
output.append('{}="{}"'.format(attr, value))
if output:
return ' ' + ' '.join(output)
else:
return ''
def add_class(self, class_names):
new_classes = class_names.split()
existing_classes = self._attributes.setdefault('class', '').split()
for class_name in new_classes:
if class_name not in existing_classes:
self._attributes['class'] += ' ' + class_name
self._attributes['class'] = self._attributes['class'].strip()
def add_style(self, style):
old_style = self._attributes.setdefault('style', '')
self._attributes['style'] = old_style + ('' if old_style.endswith(';') else ';') + style
# noinspection PyAttributeOutsideInit
def add_attribute(self, key, value):
if key in self._attributes:
logging.warning('"{}" attribute already set, overridden'.format(key))
self._attributes[key] = value
def add_variable(self, web_object):
name = self.id.lower() + '_' + str(len(self._variables) + 1)
self._variables[name] = objectify(web_object)
return name
def get_html(self, renderer):
pass
def serialize(self):
return {'class_name': self.__class__.__name__, 'id': self.id}
def __add__(self, other):
obj = objectify(other)
if obj is None:
return self
elif isinstance(obj, Object):
return Objects([self, obj])
return NotImplemented
def __radd__(self, other):
obj = objectify(other)
if obj is None:
return self
elif isinstance(obj, Object):
return Objects([obj, self])
return NotImplemented
def __iadd__(self, other):
obj = objectify(other)
if obj is None:
return self
elif isinstance(obj, (Object, Objects)):
if 'items' in dir(self):
if self.items is None:
self.items = Objects()
self.items += obj
return self
return Objects([self, obj])
else:
raise NotImplementedError("{} does not have 'items'".format(self.__class__.__name__))
@property
def jq(self):
from shark.actions import JQ
return JQ("$('#{}')".format(self.id), self)
@classmethod
def example(cls):
return NotImplemented
class Objects(list, BaseObject):
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
self.append(*args)
self._parent = None
def __repr__(self):
original = list.__repr__(self)
return "{}({})".format(self.__class__.__name__, original)
def get_html(self, html):
if self._parent and isinstance(self._parent, Object):
html.parent_tree.insert(0, self._parent)
for web_object in self:
html.render('', web_object)
html.parent_tree.pop(0)
else:
for web_object in self:
html.render('', web_object)
def append(self, *objects):
for obj in objects:
if obj is not None:
if isinstance(obj, Iterable) and not isinstance(obj, str):
self.append(*obj)
else:
obj = objectify(obj)
super().append(obj)
obj._parent = self
def __add__(self, other):
self.append(objectify(other))
return self
def __radd__(self, other):
self.insert(0, objectify(other))
return self
def __iadd__(self, other):
self.append(objectify(other))
return self
class PlaceholderWebObject(BaseObject):
def __init__(self, handler, object_id, class_name):
self.handler = handler
self.id = object_id
self.class_name = class_name
self.variables = {}
self.jqs = []
@property
def jq(self):
from shark.actions import JQ
jq = JQ("$('#{}')".format(self.id), self)
self.jqs.append(jq)
return jq
def add_variable(self, web_object):
name = self.id.lower() + '_' + str(len(self.variables) + 1)
self.variables[name] = objectify(web_object)
return name
def replace(self, web_object):
self.jq.html(web_object)
# TODO: Support calling methods on the original class, like Image().src()
def src(self, src):
return self.jq.attr('src', src)
class Text(Object):
"""
Just plain text.
"""
def __init__(self, text='', **kwargs):
self.init(kwargs)
self.text = self.param(text, StringParam, 'The text')
def get_html(self, html):
html.append(self.text)
def __str__(self):
return self.text or ''
@classmethod
def example(cls):
return Text('Hello world!')
def objectify(obj):
if isinstance(obj, Object) or obj is None:
return obj
elif isinstance(obj, Iterable) and not isinstance(obj, str):
return Objects(obj)
elif isinstance(obj, Objects):
return None
else:
return Text(str(obj))
| |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for default_types."""
from tensorflow.core.function.trace_type import default_types
from tensorflow.python.platform import test
class DefaultTypesTest(test.TestCase):
def testGenericSupertypes(self):
generic_a = default_types.Generic(1)
generic_b = default_types.Generic(2)
generic_c = default_types.Generic(1)
self.assertEqual(generic_a, generic_a.most_specific_common_supertype([]))
self.assertEqual(generic_a,
generic_a.most_specific_common_supertype([generic_a]))
self.assertEqual(generic_a,
generic_a.most_specific_common_supertype([generic_c]))
self.assertIsNone(generic_a.most_specific_common_supertype([generic_b]))
def testOrderedCollectionTypeEquality(self):
collection = default_types.OrderedCollection
generic = default_types.Generic
collection_a = collection(generic(1), generic(2), generic(3))
collection_b = collection(generic(1), generic(2), generic(1))
collection_c = collection(generic(1), generic(2), generic(3))
self.assertNotEqual(collection_a, collection_b)
self.assertEqual(collection_a, collection_c)
self.assertEqual(hash(collection_a), hash(collection_c))
def testOrderedCollectionTypeSubtype(self):
class Subtypable(default_types.Generic):
def is_subtype_of(self, other):
return self._object == 2 or other._object == 3
collection = default_types.OrderedCollection
collection_a = collection(Subtypable(1), Subtypable(2), Subtypable(3))
collection_b = collection(Subtypable(2), Subtypable(1), Subtypable(2))
collection_c = collection(Subtypable(1), Subtypable(3), Subtypable(3))
self.assertTrue(collection_b.is_subtype_of(collection_c))
self.assertFalse(collection_a.is_subtype_of(collection_b))
self.assertFalse(collection_c.is_subtype_of(collection_a))
def testOrderedCollectionTypeSupertype(self):
class Supertypable(default_types.Generic):
def most_specific_common_supertype(self, others):
if not others:
return self
if self._object == 2 and isinstance(others[0]._object, int):
return Supertypable(3)
else:
return None
collection = default_types.OrderedCollection
collection_a = collection(Supertypable(1), Supertypable(2), Supertypable(3))
collection_b = collection(Supertypable(2), Supertypable(2), Supertypable(2))
self.assertEqual(collection_a,
collection_a.most_specific_common_supertype([]))
self.assertIsNone(
collection_a.most_specific_common_supertype([collection_b]))
self.assertEqual(
collection_b.most_specific_common_supertype([collection_a]),
collection(Supertypable(3), Supertypable(3), Supertypable(3)))
def testDictTypeSubtype(self):
class MockSubtypeOf2(default_types.Generic):
def is_subtype_of(self, other):
return other._object == 2
dict_type = default_types.Dict
dict_a = dict_type({
'a': MockSubtypeOf2(1),
'b': MockSubtypeOf2(1),
'c': MockSubtypeOf2(1)
})
dict_b = dict_type({
'a': MockSubtypeOf2(2),
'b': MockSubtypeOf2(2),
'c': MockSubtypeOf2(2)
})
dict_c = dict_type({'a': MockSubtypeOf2(1), 'b': MockSubtypeOf2(1)})
self.assertTrue(dict_a.is_subtype_of(dict_b))
self.assertFalse(dict_c.is_subtype_of(dict_b))
self.assertFalse(dict_c.is_subtype_of(dict_a))
def testDictTypeSupertype(self):
class MockSupertypes2With3(default_types.Generic):
def most_specific_common_supertype(self, others):
if not others:
return self
if self._object == 2 and isinstance(others[0]._object, int):
return MockSupertypes2With3(3)
else:
return None
dict_type = default_types.Dict
dict_a = dict_type({
'a': MockSupertypes2With3(1),
'b': MockSupertypes2With3(2),
'c': MockSupertypes2With3(3)
})
dict_b = dict_type({
'a': MockSupertypes2With3(2),
'b': MockSupertypes2With3(2),
'c': MockSupertypes2With3(2)
})
self.assertEqual(dict_a,
dict_a.most_specific_common_supertype([]))
self.assertIsNone(dict_a.most_specific_common_supertype([dict_b]))
self.assertEqual(
dict_b.most_specific_common_supertype([dict_a]),
dict_type({
'a': MockSupertypes2With3(3),
'b': MockSupertypes2With3(3),
'c': MockSupertypes2With3(3)
}))
def testListTupleInequality(self):
generic = default_types.Generic
list_a = default_types.List(generic(1), generic(2), generic(3))
list_b = default_types.List(generic(1), generic(2), generic(3))
tuple_a = default_types.Tuple(generic(1), generic(2), generic(3))
tuple_b = default_types.Tuple(generic(1), generic(2), generic(3))
self.assertEqual(list_a, list_b)
self.assertEqual(tuple_a, tuple_b)
self.assertNotEqual(list_a, tuple_a)
self.assertNotEqual(tuple_a, list_a)
def testDictTypeEquality(self):
dict_type = default_types.Dict
generic = default_types.Generic
dict_a = dict_type({generic(1): generic(2), generic(3): generic(4)})
dict_b = dict_type({generic(1): generic(2)})
dict_c = dict_type({generic(3): generic(4), generic(1): generic(2)})
self.assertEqual(dict_a, dict_c)
self.assertNotEqual(dict_a, dict_b)
def testReferenceSubtype(self):
class MockSubtypeOf2(default_types.Generic):
def is_subtype_of(self, other):
return other._object == 2
original = default_types.Reference(MockSubtypeOf2(3), 1)
clone = default_types.Reference(MockSubtypeOf2(3), 1)
different_id = default_types.Reference(MockSubtypeOf2(3), 2)
supertype = default_types.Reference(MockSubtypeOf2(2), 1)
different_type = default_types.Generic(1)
self.assertEqual(original, clone)
self.assertFalse(original.is_subtype_of(different_id))
self.assertTrue(original.is_subtype_of(supertype))
self.assertFalse(supertype.is_subtype_of(original))
self.assertFalse(original.is_subtype_of(different_type))
def testReferenceSupertype(self):
class Mock2AsTopType(default_types.Generic):
def most_specific_common_supertype(self, types):
if not all(isinstance(other, Mock2AsTopType) for other in types):
return None
return self if all(self._object == other._object
for other in types) else Mock2AsTopType(2)
original = default_types.Reference(Mock2AsTopType(3), 1)
clone = default_types.Reference(Mock2AsTopType(3), 1)
different_id = default_types.Reference(Mock2AsTopType(3), 2)
supertype = default_types.Reference(Mock2AsTopType(2), 1)
different_type = default_types.Generic(1)
self.assertEqual(supertype.most_specific_common_supertype([]), supertype)
self.assertEqual(original.most_specific_common_supertype([clone]), original)
self.assertIsNone(original.most_specific_common_supertype([different_id]))
self.assertIsNone(original.most_specific_common_supertype([different_type]))
if __name__ == '__main__':
test.main()
| |
import os
import sys
import os.path
import logging
import time
import pprint
import shutil
import traceback
import settings
import pArch
import dbPhashApi as dbApi
import scanner.fileHasher
PHASH_DISTANCE_THRESHOLD = 2
BAD_PHASHES = [
# Phash value of '0' is commonly a result of an image where there is no content, such as a blank page.
# There are 79 THOUSAND of these in my collection. As a result, the existence check is prohibitively slow, so
# we just short-circuit and ignore it.
0,
-9223372036854775808, # 0x8000000000000000
]
class ArchiveProcessorException(Exception):
pass
class DatabaseDesynchronizedError(ArchiveProcessorException):
pass
class InvalidArchiveContentsException(ArchiveProcessorException):
pass
class InvalidArchivePhashContentsException(InvalidArchiveContentsException):
pass
class InvalidArchiveMd5ContentsException(InvalidArchiveContentsException):
pass
class ProxyDbBase(object):
def __init__(self):
self.db = self.getDbConnection()
# Overridden in child classes so the unit tests can redirect
# db access to the testing database by returning a different DB
# connection object.
def getDbConnection(self): # pragma: no cover
return dbApi.PhashDbApi()
# def convertDbIdToPath(self, inId):
# return self.db.getItem(wantCols=['fsPath', "internalPath"], dbId=inId)
class ArchChecker(ProxyDbBase):
'''
Class to encapsulate the required object to check if `archpath` is unique.
'''
hasher = scanner.fileHasher.HashThread
def __init__(self, archPath, pathNegativeFilter=None, pathPositiveFilter=None, negativeKeywords=None):
'''
Params:
pathNegativeFilter (list): default =``[]``
List of paths to exclude from matching.
By default, and empty list, leading to all possible paths being used.
'''
super().__init__()
self.negativeMaskedPaths = pathNegativeFilter or []
self.positiveMaskedPaths = pathPositiveFilter or []
self.negativeKeywords = negativeKeywords or []
self.archPath = archPath
self.arch = pArch.PhashArchive(archPath)
self.log = logging.getLogger("Main.Deduper")
self.log.info("ArchChecker Instantiated on '%s'", archPath)
# If getMatchingArchives returns something, it means we're /not/ unique,
# because getMatchingArchives returns matching files
def isBinaryUnique(self):
'''
Is the archive this class was instantiated on binary unique (e.g. there
is a byte-for-byte complete duplicate of it) elsewhere on the filesystem,
that still exists.
Returns:
Boolean: True if unique, False if not.
'''
ret = self.getMatchingArchives()
if len(ret):
return False
return True
def isPhashUnique(self, searchDistance=None):
'''
Is the archive this class was instantiated on phash unique, where the
duplicating files elsewhere on the filesystem still exist.
Phash-unique means there are matches for each of the files in the archive,
including searching by phash within distance `searchDistance` for any files
that are images.
Returns:
Boolean: True if unique, False if not.
'''
if searchDistance is None:
searchDistance=PHASH_DISTANCE_THRESHOLD
ret = self.getPhashMatchingArchives(searchDistance, getAllCommon=False)
if len(ret):
return False
return True
def getBestBinaryMatch(self):
'''
Get the filesystem path of the "best" matching archive.
"Best" is a somewhat fuzzy concept. In this case, it's assumed to be
the archive with the largest number of images in common.
If two archives share the same number of matching images, the larger
of the two matching archives is selected. If they're the same size,
the chosen archive will be chosen arbitrarily.
Returns:
String: Path to archive on the local filesystem. Path is verified to
exist at time of return.
If the current archive contains unique files, this will return a empty string.
'''
ret = self.getMatchingArchives()
return self._getBestMatchingArchive(ret)
def getBestPhashMatch(self, distance=None):
'''
Get the filesystem path of the "best" matching archive.
"Best" is a somewhat fuzzy concept. In this case, it's assumed to be
the archive with the largest number of images in common.
If two archives share the same number of matching images, the larger
of the two matching archives is selected. If they're the same size,
the chosen archive will be chosen arbitrarily.
Identical to `getBestBinaryMatch()`, except including phash-matches
in the search for matches.
Returns:
String: Path to archive on the local filesystem. Path is verified to
exist at time of return.
'''
if distance is None:
distance=PHASH_DISTANCE_THRESHOLD
ret = self.getPhashMatchingArchives(distance, getAllCommon=False)
return self._getBestMatchingArchive(ret)
def getSignificantlySimilarArches(self, searchDistance=None):
'''
This function returns a dict of lists containing archives with files in common with
the current archive. It only operates using phash similarity metrics (as phash searches
are intrinsically a superset of binary match similarity metrics).
The dict keys are the number of files in common, and the list is a number of filesystem-
paths to the intersecting archives.
'''
if searchDistance is None:
searchDistance=PHASH_DISTANCE_THRESHOLD
common = self.getPhashMatchingArchives(getAllCommon=True, searchDistance=searchDistance)
ret = self._processMatchesIntoRet(common)
# Now, we truncate the return common set to every item which has >
# the mean number of items in common
# This is a preventative measure against things like scanlators which
# have a credit page they put EVERYWHERE, and we therefor want to
# disregard.
# print("Common:")
# pprint.pprint(common)
# print("Ret:")
# pprint.pprint(ret)
keys = list(ret.keys())
if not keys:
return ret
mean = (sum(keys) / len(keys))
for key in [key for key in keys if key < mean]:
ret.pop(key)
# And pop all items which have only one item in common
if 1 in ret:
ret.pop(1)
# Sort the return, to make it deterministic
for item in ret.values():
item.sort()
return ret
def _processMatchesIntoRet(self, matches):
'''
This takes a dict of items where each key is a filesystem path, and the value
is a set of internal-paths that are matched in the archive at the key filesystem path.
It transforms that dict into another dict where the key is the number of matches
that a filesystem path has, and the value is a list of filesystem paths that
had `key` matches.
'''
ret = {}
for key in matches.keys():
ret.setdefault(len(matches[key]), []).append(key)
# Make the return ordering deterministic
for key in ret.keys():
ret[key].sort()
return ret
def _shouldSkipFile(self, fileN, fileType):
'''
Internal method call. Is used to filter out files that are considered
"garbage" from evaluation in the matching search. This includes things
like the windows "Thumbs.db" file, some of the information notes generated
by the automated ad-removal system in MangaCMS, and `__MACOSX` resource
fork files&directory that Crapple loves to spew all over any non-HFS
volumes.
Returns:
Boolean: True if the file is garbage, False if it is not.
'''
thumbs_file_types = [
# So debian wheezy is so out of date their libmagick
# doesn't appear to have the mimetype parameter.
'Composite Document File V2 Document, No summary info',
'application/CDFV2-corrupt',
'application/CDFV2',
]
if fileN.endswith("Thumbs.db") and fileType in thumbs_file_types:
self.log.info("Windows thumbnail database. Ignoring")
return True
# We have to match both 'ASCII text', and the occational 'ASCII text, with no line terminators'
if fileN.endswith("deleted.txt") and fileType =='text/plain':
self.log.info("Found removed advert note. Ignoring")
return True
if '__MACOSX/' in fileN:
self.log.info("Mac OS X cache dir. Ignoring")
return True
return False
def _getBestMatchingArchive(self, ret):
'''
Internal function that drives `getBestBinaryMatch()` and `getBestPhashMatch()`.
"Best" match is kind of a fuzzy term here. I define it as the archive with the
most files in common with the current archive.
If there are multiple archives with identical numbers of items in common,
the "best" is then the largest of those files
(I assume that the largest is probably either a 1. volume archive, or
2. higher quality)
'''
# Short circuit for no matches
if not len(ret):
return None
tmp = {}
for key in ret.keys():
tmp.setdefault(len(ret[key]), []).append(key)
maxKey = max(tmp.keys())
# If there is only one file with the most items, return that.
if len(tmp[maxKey]) == 1:
return tmp[maxKey].pop()
items = [(os.path.getsize(item), item) for item in tmp[maxKey]]
items.sort()
# TODO: The fitness of the match should additionally consider the number of files in each dir.
# e.g. if the current file has 100 files, with 10 in common with another file with
# 100 files, that's not really a good match. On the other hand, if the current
# file has 100 files, with 10 in common with another file which has a total of
# 10 files in it, it's an excellent match since the current file is a complete
# superset of the other file.
# Finally, sort by size, return the biggest one of them
return items.pop()[-1]
def _getBinaryMatchesForHash(self, hexHash):
'''
Params:
hexHash (String): The hash to match against.
Returns:
dict of sets. Dict keys are filesystem paths, and the set contains
the internal path of each item in the key that has the query key
This function searches for all items with a binary hash of `hexHash`, masks out
any paths in `self.negativeMaskedPaths`, and then checks for file existence. If the file exists,
it's inserted into a local dictionary with the key being the filesystem path,
and the value being a set into which the internal path is inserted.
'''
matches = {}
dupsIn = self.db.getByHash(hexHash, wantCols=['fsPath', 'internalPath'])
for fsPath, internalPath in dupsIn:
# Mask out items on the same path.
if fsPath == self.archPath:
continue
# Do negative path masking
if any([fsPath.startswith(badpath) for badpath in self.negativeMaskedPaths]):
continue
# And positive masking
if self.positiveMaskedPaths and not any([fsPath.startswith(badpath) for badpath in self.positiveMaskedPaths]):
continue
if self.negativeKeywords and any([tmp in fsPath for tmp in self.negativeKeywords]):
continue
exists = os.path.exists(fsPath)
if exists:
matches.setdefault(fsPath, set()).add(internalPath)
elif not exists:
self.log.warning("Item '%s' no longer exists!", fsPath)
self.db.deleteDbRows(fspath=fsPath)
return matches
def getMatchingArchives(self):
'''
This function does two things.
1. It iterates over all the files in an archive, checking each file for binary uniqueness
via MD5sums.
2. Accumulates a list of each file with any files in common with the archive
this class was instantiated on.
The return value can be two things:
If the instantiated archive contains unique items, the return value is
an empty set (`{}`).
If the target archive does not contain unique files, the return value is a
dict of sets, where the key is the filesystem path of each archive containing
matching items, and the value is a set containing the items that the
filesystem-path-key has in common with the target archive.
'''
self.log.info("Checking if %s contains any binary unique files.", self.archPath)
matches = {}
for fileN, infoDict in self.arch.iterHashes():
if self._shouldSkipFile(fileN, infoDict['type']):
continue
# get a dict->set of the matching items
matchDict = self._getBinaryMatchesForHash(infoDict['hexHash'])
if matchDict:
# If we have matching items, merge them into the matches dict->set
for key in matchDict.keys():
matches.setdefault(key, set()).update(matchDict[key])
else:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
return {}
self.log.info("It does not contain any binary unique file(s).")
return matches
def _loadFileContents(self):
ret = []
for fileN, infoDict in self.arch.iterHashes():
if self._shouldSkipFile(fileN, infoDict['type']):
continue
ret.append((fileN, infoDict))
return ret
def _doRowLookup(self, matchids, resolution):
keys = ["dbid", "fspath", "internalpath", "itemhash", "phash", "itemkind", "imgx", "imgy"]
# self.log.info("Row lookup for %s (%s)", matchids, resolution)
ret_rows = []
for matchid in matchids:
row = self.db.getItem(dbId=matchid)
# Sometimes a row has been deleted without being removed from the tree.
# If this has happened, getItem() will return an empty list.
# Don't return that, if it happens
if not row:
self.log.info("Row deleted without updating tree")
continue
row = dict(zip(keys, row))
# Mask out items on the same path.
if row['fspath'] == self.archPath:
continue
# Mask with the masked-paths array
if any([row['fspath'].startswith(badpath) for badpath in self.negativeMaskedPaths]):
print("(negativeMaskedPaths) MaskedPath: ", row['fspath'], " in ", self.negativeMaskedPaths)
continue
# And positive masking
if self.positiveMaskedPaths and not any([row['fspath'].startswith(badpath) for badpath in self.positiveMaskedPaths]):
print("(positiveMaskedPaths) MaskedPath: ", row['fspath'], " in ", self.negativeMaskedPaths)
continue
# I genuinely cannot see how this line would get hit, but whatever.
if row['phash'] is None and resolution: #pragma: no cover
raise ValueError("Line is missing phash, yet in phash database? DbId = '%s'", row['dbid'])
if (not row['imgx'] or not row['imgy']) and resolution:
self.log.warning("Image with no resolution stats! Wat?.")
self.log.warning("Image: '%s', '%s'", row['fspath'], row['internalpath'])
continue
if resolution and len(resolution) == 2:
res_x, res_y = resolution
if res_x > row['imgx'] or res_y > row['imgy']:
# self.log.info("Filtering phash match due to lower resolution.")
continue
# else:
# self.log.info("Image not resolution filtered: (%s x %s) - (%s x %s).", res_x, res_y, row['imgx'], row['imgy'])
if not os.path.exists(row['fspath']):
self.log.info("File deleted without updating tree")
continue
ret_rows.append(row)
# Pack returned row tuples into nice dicts for easy access
return ret_rows
def _isBadPee(self, phash):
return phash in BAD_PHASHES
def _doHashSearches(self, filelist, searchDistance, resolutionFilter):
for fileN, infoDict in filelist:
infoDict["fileN"] = fileN
# Do the normal binary lookup
for dummy_fileN, infoDict in filelist:
# get a dict->set of the matching items
infoDict['binMatchIds'] = [tmp for tmp, in self.db.getByHash(infoDict['hexHash'], wantCols=['dbid'])]
# Then, atomically do the phash searches
# I really don't like reaching into the class this far, but
# it means I don't need to import the contextlib library into the phashdbapi file.
matches = self.db.searchPhashSet([infoDict['pHash'] for fileN, infoDict in filelist if infoDict['pHash'] is not None], searchDistance)
for fileN, infoDict in filelist:
if infoDict['pHash'] is not None:
infoDict['pMatchIds'] = matches[infoDict['pHash']]
# Finally, resolve out the row returns from the p-hash searches out
# too db rows.
for fileN, infoDict in filelist:
if resolutionFilter:
imgDims = (infoDict['imX'], infoDict['imY'])
else:
imgDims = None
if 'pMatchIds' in infoDict:
if self._isBadPee(infoDict['pHash']):
self.log.warning("Skipping any checks for hash value of '%s', as it's uselessly common.", infoDict['pHash'])
elif len(infoDict['pMatchIds']) > 100:
self.log.info("Skipping existence check due to quantity of candidate matches.")
else:
infoDict['pMatches'] = self._doRowLookup(infoDict['pMatchIds'], imgDims)
# print("PHash Matches: ", infoDict['pMatches'])
if 'binMatchIds' in infoDict:
if self._isBadPee(infoDict['pHash']):
self.log.warning("Skipping any checks for hash value of '%s', as it's uselessly common.", infoDict['pHash'])
elif len(infoDict['binMatchIds']) > 100:
self.log.info("Skipping existence check due to quantity of candidate matches.")
else:
# Resolution filtering is pointless here, since we matched on the MD5s, rather then file hashes
infoDict['bMatches'] = self._doRowLookup(infoDict['binMatchIds'], False)
# print("Binary Matches: ", infoDict['bMatches'])
return filelist
def _checkHashesOk(self, fileContent, searchDistance):
'''
Do some integrity checks against the loaded file content, to catch some possible
issues.
Primarily, this detects issues where the files in an archive are mis-hashed due to
library issues.
The idea is that a single archive should be at least ~75% unique. If an archive has
10 images, yet only 5 of them are unique even within the archive, something is
probably wrong somewhere.
'''
md5s = [filed['hexHash'] for filen, filed in fileContent if not filed['pHash']]
muniqueratio = len(set(md5s)) / max(1, len(md5s))
phashes = [filed['pHash'] for filen, filed in fileContent if filed['pHash']]
so_far = []
unique = 0
for phash in phashes:
similarity = [dbApi.hammingDistance(phash, other) for other in so_far]
coincides = [tmp for tmp in similarity if tmp <= searchDistance]
so_far.append(phash)
if not coincides:
unique += 1
puniqratio = unique / max(1, len(phashes))
hashratio = len(phashes) / max(1, len(md5s))
# print("phashes", len(phashes))
# print("muniqueratio", muniqueratio)
# print("unique", unique)
# print("puniqratio", puniqratio)
# print("hashratio", hashratio)
# print("len(md5s)", len(md5s))
# print("len(set(md5s))", len(set(md5s)))
if len(phashes) and puniqratio < 0.5:
raise InvalidArchivePhashContentsException("Too many identical images (phash-search) in the archive!")
# If there are any md5-only files, check they're at least 50% unique
# Only do this if there are more md5s then images
if len(md5s) and muniqueratio <= 0.6 and hashratio <= 1:
raise InvalidArchiveMd5ContentsException("Too many identical files in the archive!")
# This really, /really/ feels like it should be several smaller functions, but I cannot see any nice ways to break it up.
# It's basically like 3 loops rolled together to reduce processing time and lookups, and there isn't much I can do about that.
def getPhashMatchingArchives(self, searchDistance=None, getAllCommon=False, resolutionFilter=True):
'''
This function effectively mirrors the functionality of `getMatchingArchives()`,
except that it uses phash-duplicates to identify matches as well as
simple binary equality.
The additional `getAllCommon` parameter overrides the early-return behaviour if
one of the scanned items is unique. As such, if `getAllCommon` is True,
it will phash search for every item in the archive, even if they're all unique.
It also disables the resolution filtering of the match results.
This is necessary for finding commonalities between archives, which is intended
to return archives that the current archive has potentially superceded.
'''
if searchDistance is None:
searchDistance = PHASH_DISTANCE_THRESHOLD
self.log.info("Scanning for phash duplicates.")
matches = {}
fc = self._loadFileContents()
self._checkHashesOk(fc, searchDistance)
hashMatches = self._doHashSearches(fc, searchDistance, resolutionFilter)
for container_filen, infoDict in hashMatches:
fileN = infoDict['fileN']
if self._shouldSkipFile(fileN, infoDict['type']):
continue
# Handle cases where an internal file is not an image
if infoDict['pHash'] is None:
self.log.warning("No phash for file '%s'! Wat?", fileN)
self.log.warning("Returned pHash: '%s'", infoDict['pHash'])
self.log.warning("Guessed file type: '%s'", infoDict['type'])
self.log.warning("Should skip: '%s'", self._shouldSkipFile(fileN, infoDict['type']))
self.log.warning("Using binary dup checking for file!")
# If we have a phash, and yet pMatches is not present,
# the duper skipped loading the matching files because
# of quantity.
# As such, just continue on.
if 'binMatchIds' in infoDict and not 'bMatches' in infoDict:
continue
# get a dict->set of the matching items
matchList = infoDict['bMatches']
if matchList:
for matchDict in matchList:
# If we have matching items, merge them into the matches dict->set
matches.setdefault(matchDict['fspath'], {})[(container_filen, fileN)] = True
elif not getAllCommon:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
return {}
# Any non-none and non-0 matches get the normal lookup behaviour.
else:
# If we have a phash, and yet pMatches is not present,
# the duper skipped loading the matching files because
# of quantity.
# As such, just continue on.
if 'pHash' in infoDict and 'pMatchIds' in infoDict and not 'pMatches' in infoDict:
continue
matchList = infoDict['pMatches']
if matchList:
for matchDict in matchList:
# If we have matching items, merge them into the matches dict->set
# These are stored with the key being the item in the /original archive/ they
# match to. This way, if one file in the current archive matches
# to many images another archive, it will only get counted as a single
# match.
# This is because there are some archives with many, many white pages in them.
# Therefore, if a deduplicated archive has a single white page, it was
# resulting in an errant high similarity rating with the archive containing
# many duplicate files, which produces a mis-link in the post-deduplication
# relinking.
matches.setdefault(matchDict['fspath'], {})[(container_filen, fileN)] = True
elif not getAllCommon:
# Short circuit on unique item, since we are only checking if ANY item is unique
self.log.info("It contains at least one unique file(s).")
self.log.info("Archive contains at least one unique phash(es).")
self.log.info("First unique file: '%s'", fileN)
return {}
self.log.info("Archive does not contain any unique phashes.")
return matches
def deleteArch(self, moveToPath=False):
'''
Delete target arch.
If ``moveToPath`` is specified, the archive will be moved there instead, as an option
for deferred deletion.
When ``moveToPath`` is specified, the current path is prepended to the filename, by
replacing all directory delimiters (``/``) with semicolons (``;``). This allows the
moved archive to be returned to it's original fs location in (almost) all cases.
'''
self.db.deleteDbRows(fspath=self.archPath)
if not moveToPath:
self.log.warning("Deleting archive '%s'", self.archPath)
os.remove(self.archPath)
else:
dst = self.archPath.replace("/", ";")
dst = os.path.join(moveToPath, dst)
self.log.info("Moving item from '%s'", self.archPath)
self.log.info(" to '%s'", dst)
try:
for x in range(3):
try:
shutil.move(self.archPath, dst)
break
except OSError:
self.log.error("Failure moving file?")
time.sleep(0.1)
if x == 2:
raise
except KeyboardInterrupt: # pragma: no cover (Can't really test keyboard interrupts)
raise
except (OSError, FileNotFoundError):
self.log.error("ERROR - Could not move file!")
self.log.error(traceback.format_exc())
def deleteArchFromDb(self):
self.db.deleteDbRows(fspath=self.archPath)
def addArch(self):
'''
Add the hash values from the target archive to the database, with the current
archive FS path as it's location.
'''
self.log.info("Adding archive to database. Hashing file: %s", self.archPath)
# Delete any existing hashes that collide
self.deleteArchFromDb()
# And tell the hasher to process the new archive.
hasher = self.hasher(inputQueue=None, outputQueue=None, runMgr=None)
hasher.processArchive(self.archPath)
# Proxy through to the archChecker from UniversalArchiveInterface
@staticmethod
def isArchive(archPath):
'''
Simple staticmethod boolean check. Used to determine of the item
at the passed path (``archPath``) is actually an archive, by
looking at it with ``libmagic``.
'''
return pArch.PhashArchive.isArchive(archPath)
def getSignificantlySimilarArches(filePath, distance=4):
log = logging.getLogger("Main.DedupServer")
# print("Args:", (filePath, distance))
try:
ck = ArchChecker(filePath, pathNegativeFilter=settings.masked_path_prefixes)
return ck.getSignificantlySimilarArches(searchDistance=distance)
except Exception:
log.critical("Exception when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
return "error!"
def processDownload(filePath, pathNegativeFilter=None, distance=None, moveToPath=None, checkClass=ArchChecker, cross_match=True, pathPositiveFilter=None, negativeKeywords=None):
'''
Process the file `filePath`. If it's a phash or binary duplicate, it is deleted.
The `checkClass` param is to allow the checking class to be overridden for testing.
Returns:
(tag, bestMatch) tuple.
`tag` is a string containing space-separated tags corresponding to
the deduplication state (e.g. `deleted`, `was-duplicate`, and `phash-duplicate`)
`bestMatch` is the fspath of the best-matching other archive.
'''
log = logging.getLogger("Main.DedupServer")
status = ''
bestMatch = None
common = {}
# Hackyness to work around some strange behaviour in the
# netref objects from rpyc.
pathNegativeFilter_local = []
pathPositiveFilter_local = []
if isinstance(pathNegativeFilter, (list, tuple)):
for item in pathNegativeFilter:
pathNegativeFilter_local.append(item)
if isinstance(pathPositiveFilter, (list, tuple)):
for item in pathPositiveFilter:
pathPositiveFilter_local.append(item)
pathNegativeFilter_local.extend(settings.masked_path_prefixes)
try:
ck = checkClass(filePath, pathNegativeFilter=pathNegativeFilter_local, pathPositiveFilter=pathPositiveFilter_local, negativeKeywords=negativeKeywords)
if cross_match:
common = ck.getSignificantlySimilarArches(searchDistance=distance)
else:
common = None
binMatch = ck.getBestBinaryMatch()
if binMatch:
ck.deleteArch(moveToPath=moveToPath)
return 'deleted was-duplicate', binMatch, common
pMatch = ck.getBestPhashMatch(distance=distance)
if pMatch:
ck.deleteArch(moveToPath=moveToPath)
return 'deleted was-duplicate phash-duplicate', pMatch, common
ck.addArch()
except InvalidArchivePhashContentsException:
log.critical("Excessive duplicates when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
status += " warning phash-conflict"
except Exception:
log.critical("Exception when processing item!")
for line in traceback.format_exc().split("\n"):
log.critical(line)
status += " damaged"
status = status.strip()
log.info("Returning status '%s' for archive '%s'. Best Match: '%s'", status, filePath, bestMatch)
return status, bestMatch, common
def commandLineReloadTree(scanConf):
import rpyc
remote = rpyc.connect("localhost", 12345)
# print("Forcing reload of phash tree. Search functionality will block untill load is complete.")
remote.root.reloadTree()
# print("Tree reloaded!")
def commandLineProcess(scanConf):
import scanner.logSetup
import rpyc
scanner.logSetup.initLogging()
if not os.path.exists(scanConf.sourcePath):
# print("ERROR: Source file does not exist!")
return
if not os.path.isfile(scanConf.sourcePath):
# print("ERROR: Source is not a file!")
return
if scanConf.noContext:
scanContext = None
else:
scanContext = [os.path.split(scanConf.sourcePath)]
remote = rpyc.connect("localhost", 12345)
status, bestMatch, intersections = remote.root.processDownload(
scanConf.sourcePath,
pathNegativeFilter=scanContext,
distance=6,
moveToPath=None,
locked=True)
# print("Processed archive. Return status '%s'", status)
if bestMatch:
print("Matching archive '%s'", bestMatch)
return status, bestMatch, intersections
| |
# coding: utf-8
"""
Provides test-related code that can be used by all tests.
"""
import os
import pystache
from pystache import defaults
from pystache.tests import examples
# Save a reference to the original function to avoid recursion.
_DEFAULT_TAG_ESCAPE = defaults.TAG_ESCAPE
_TESTS_DIR = os.path.dirname(pystache.tests.__file__)
DATA_DIR = os.path.join(_TESTS_DIR, 'data') # i.e. 'pystache/tests/data'.
EXAMPLES_DIR = os.path.dirname(examples.__file__)
PACKAGE_DIR = os.path.dirname(pystache.__file__)
PROJECT_DIR = os.path.join(PACKAGE_DIR, '..')
# TEXT_DOCTEST_PATHS: the paths to text files (i.e. non-module files)
# containing doctests. The paths should be relative to the project directory.
TEXT_DOCTEST_PATHS = ['README.md']
UNITTEST_FILE_PREFIX = "test_"
def get_spec_test_dir(project_dir):
return os.path.join(project_dir, 'ext', 'spec', 'specs')
def html_escape(u):
"""
An html escape function that behaves the same in both Python 2 and 3.
This function is needed because single quotes are escaped in Python 3
(to '''), but not in Python 2.
The global defaults.TAG_ESCAPE can be set to this function in the
setUp() and tearDown() of unittest test cases, for example, for
consistent test results.
"""
u = _DEFAULT_TAG_ESCAPE(u)
return u.replace("'", ''')
def get_data_path(file_name=None):
"""Return the path to a file in the test data directory."""
if file_name is None:
file_name = ""
return os.path.join(DATA_DIR, file_name)
# Functions related to get_module_names().
def _find_files(root_dir, should_include):
"""
Return a list of paths to all modules below the given directory.
Arguments:
should_include: a function that accepts a file path and returns True or False.
"""
paths = [] # Return value.
is_module = lambda path: path.endswith(".py")
# os.walk() is new in Python 2.3
# http://docs.python.org/library/os.html#os.walk
for dir_path, dir_names, file_names in os.walk(root_dir):
new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]
new_paths = filter(is_module, new_paths)
new_paths = filter(should_include, new_paths)
paths.extend(new_paths)
return paths
def _make_module_names(package_dir, paths):
"""
Return a list of fully-qualified module names given a list of module paths.
"""
package_dir = os.path.abspath(package_dir)
package_name = os.path.split(package_dir)[1]
prefix_length = len(package_dir)
module_names = []
for path in paths:
path = os.path.abspath(path) # for example <path_to_package>/subpackage/module.py
rel_path = path[prefix_length:] # for example /subpackage/module.py
rel_path = os.path.splitext(rel_path)[0] # for example /subpackage/module
parts = []
while True:
(rel_path, tail) = os.path.split(rel_path)
if not tail:
break
parts.insert(0, tail)
# We now have, for example, ['subpackage', 'module'].
parts.insert(0, package_name)
module = ".".join(parts)
module_names.append(module)
return module_names
def get_module_names(package_dir=None, should_include=None):
"""
Return a list of fully-qualified module names in the given package.
"""
if package_dir is None:
package_dir = PACKAGE_DIR
if should_include is None:
should_include = lambda path: True
paths = _find_files(package_dir, should_include)
names = _make_module_names(package_dir, paths)
names.sort()
return names
class AssertStringMixin:
"""A unittest.TestCase mixin to check string equality."""
def assertString(self, actual, expected, format=None):
"""
Assert that the given strings are equal and have the same type.
Arguments:
format: a format string containing a single conversion specifier %s.
Defaults to "%s".
"""
if format is None:
format = "%s"
# Show both friendly and literal versions.
details = """String mismatch: %%s
Expected: \"""%s\"""
Actual: \"""%s\"""
Expected: %s
Actual: %s""" % (expected, actual, repr(expected), repr(actual))
def make_message(reason):
description = details % reason
return format % description
self.assertEqual(actual, expected, make_message("different characters"))
reason = "types different: %s != %s (actual)" % (repr(type(expected)), repr(type(actual)))
self.assertEqual(type(expected), type(actual), make_message(reason))
class AssertIsMixin:
"""A unittest.TestCase mixin adding assertIs()."""
# unittest.assertIs() is not available until Python 2.7:
# http://docs.python.org/library/unittest.html#unittest.TestCase.assertIsNone
def assertIs(self, first, second):
self.assertTrue(first is second, msg="%s is not %s" % (repr(first), repr(second)))
class AssertExceptionMixin:
"""A unittest.TestCase mixin adding assertException()."""
# unittest.assertRaisesRegexp() is not available until Python 2.7:
# http://docs.python.org/library/unittest.html#unittest.TestCase.assertRaisesRegexp
def assertException(self, exception_type, msg, callable, *args, **kwds):
try:
callable(*args, **kwds)
raise Exception("Expected exception: %s: %s" % (exception_type, repr(msg)))
except exception_type, err:
self.assertEqual(str(err), msg)
class SetupDefaults(object):
"""
Mix this class in to a unittest.TestCase for standard defaults.
This class allows for consistent test results across Python 2/3.
"""
def setup_defaults(self):
self.original_decode_errors = defaults.DECODE_ERRORS
self.original_file_encoding = defaults.FILE_ENCODING
self.original_string_encoding = defaults.STRING_ENCODING
defaults.DECODE_ERRORS = 'strict'
defaults.FILE_ENCODING = 'ascii'
defaults.STRING_ENCODING = 'ascii'
def teardown_defaults(self):
defaults.DECODE_ERRORS = self.original_decode_errors
defaults.FILE_ENCODING = self.original_file_encoding
defaults.STRING_ENCODING = self.original_string_encoding
class Attachable(object):
"""
A class that attaches all constructor named parameters as attributes.
For example--
>>> obj = Attachable(foo=42, size="of the universe")
>>> repr(obj)
"Attachable(foo=42, size='of the universe')"
>>> obj.foo
42
>>> obj.size
'of the universe'
"""
def __init__(self, **kwargs):
self.__args__ = kwargs
for arg, value in kwargs.iteritems():
setattr(self, arg, value)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,
", ".join("%s=%s" % (k, repr(v))
for k, v in sorted(self.__args__.iteritems())))
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import webob
from cinder.api.openstack import wsgi
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
class RequestTest(test.TestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual("application/json", result)
def test_content_type_from_accept(self):
for content_type in ('application/xml',
'application/vnd.openstack.volume+xml',
'application/json',
'application/vnd.openstack.volume+json'):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(content_type, result)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual("application/xml", result)
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual("application/json", result)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webob
request = wsgi.Request.blank('/')
accepted = 'unknown-lang'
request.headers = {'Accept-Language': accepted}
def fake_best_match(self, offers, default_match=None):
# Match would return None, if requested lang is not found
return None
self.stubs.SmartSet(request.accept_language,
'best_match', fake_best_match)
self.assertIsNone(request.best_match_language())
# If accept-language is not included or empty, match should be None
request.headers = {'Accept-Language': ''}
self.assertIsNone(request.best_match_language())
request.headers.pop('Accept-Language')
self.assertIsNone(request.best_match_language())
def test_cache_and_retrieve_resources(self):
request = wsgi.Request.blank('/foo')
# Test that trying to retrieve a cached object on
# an empty cache fails gracefully
self.assertIsNone(request.cached_resource())
self.assertIsNone(request.cached_resource_by_id('r-0'))
resources = []
for x in range(3):
resources.append({'id': 'r-%s' % x})
# Cache an empty list of resources using the default name
request.cache_resource([])
self.assertEqual({}, request.cached_resource())
self.assertIsNone(request.cached_resource('r-0'))
# Cache some resources
request.cache_resource(resources[:2])
# Cache one resource
request.cache_resource(resources[2])
# Cache a different resource name
other_resource = {'id': 'o-0'}
request.cache_resource(other_resource, name='other-resource')
self.assertEqual(resources[0], request.cached_resource_by_id('r-0'))
self.assertEqual(resources[1], request.cached_resource_by_id('r-1'))
self.assertEqual(resources[2], request.cached_resource_by_id('r-2'))
self.assertIsNone(request.cached_resource_by_id('r-3'))
self.assertEqual({'r-0': resources[0],
'r-1': resources[1],
'r-2': resources[2]}, request.cached_resource())
self.assertEqual(other_resource,
request.cached_resource_by_id('o-0',
name='other-resource'))
def test_cache_and_retrieve_volumes(self):
self._test_cache_and_retrieve_resources('volume')
def test_cache_and_retrieve_volume_types(self):
self._test_cache_and_retrieve_resources('volume_type')
def test_cache_and_retrieve_snapshots(self):
self._test_cache_and_retrieve_resources('snapshot')
def test_cache_and_retrieve_backups(self):
self._test_cache_and_retrieve_resources('backup')
def _test_cache_and_retrieve_resources(self, resource_name):
"""Generic helper for cache tests."""
cache_all_func = 'cache_db_%ss' % resource_name
cache_one_func = 'cache_db_%s' % resource_name
get_db_all_func = 'get_db_%ss' % resource_name
get_db_one_func = 'get_db_%s' % resource_name
r = wsgi.Request.blank('/foo')
resources = []
for x in range(3):
resources.append({'id': 'id%s' % x})
# Store 2
getattr(r, cache_all_func)(resources[:2])
# Store 1
getattr(r, cache_one_func)(resources[2])
self.assertEqual(resources[0], getattr(r, get_db_one_func)('id0'))
self.assertEqual(resources[1], getattr(r, get_db_one_func)('id1'))
self.assertEqual(resources[2], getattr(r, get_db_one_func)('id2'))
self.assertIsNone(getattr(r, get_db_one_func)('id3'))
self.assertEqual({'id0': resources[0],
'id1': resources[1],
'id2': resources[2]}, getattr(r, get_db_all_func)())
class ActionDispatcherTest(test.TestCase):
def test_dispatch(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
self.assertEqual('pants', serializer.dispatch({}, action='create'))
def test_dispatch_action_None(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual('trousers', serializer.dispatch({}, action=None))
def test_dispatch_default(self):
serializer = wsgi.ActionDispatcher()
serializer.create = lambda x: 'pants'
serializer.default = lambda x: 'trousers'
self.assertEqual('trousers', serializer.dispatch({}, action='update'))
class DictSerializerTest(test.TestCase):
def test_dispatch_default(self):
serializer = wsgi.DictSerializer()
self.assertEqual('', serializer.serialize({}, 'update'))
class XMLDictSerializerTest(test.TestCase):
def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<serversxmlns="asdf"><a>(2,3)</a></servers>'
serializer = wsgi.XMLDictSerializer(xmlns="asdf")
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_xml, result)
class JSONDictSerializerTest(test.TestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
serializer = wsgi.JSONDictSerializer()
result = serializer.serialize(input_dict)
result = result.replace('\n', '').replace(' ', '')
self.assertEqual(expected_json, result)
class TextDeserializerTest(test.TestCase):
def test_dispatch_default(self):
deserializer = wsgi.TextDeserializer()
self.assertEqual({}, deserializer.deserialize({}, 'update'))
class JSONDeserializerTest(test.TestCase):
def test_json(self):
data = """{"a": {
"a1": "1",
"a2": "2",
"bs": ["1", "2", "3", {"c": {"c1": "1"}}],
"d": {"e": "1"},
"f": "1"}}"""
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
deserializer = wsgi.JSONDeserializer()
self.assertEqual(as_dict, deserializer.deserialize(data))
class XMLDeserializerTest(test.TestCase):
def test_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
<d><e>1</e></d>
<f>1</f>
</a>
""".strip()
as_dict = {
'body': {
'a': {
'a1': '1',
'a2': '2',
'bs': ['1', '2', '3', {'c': {'c1': '1'}}],
'd': {'e': '1'},
'f': '1',
},
},
}
metadata = {'plurals': {'bs': 'b', 'ts': 't'}}
deserializer = wsgi.XMLDeserializer(metadata=metadata)
self.assertEqual(as_dict, deserializer.deserialize(xml))
def test_xml_empty(self):
xml = """<a></a>"""
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(as_dict, deserializer.deserialize(xml))
class MetadataXMLDeserializerTest(test.TestCase):
def test_xml_meta_parsing_special_character(self):
"""Test XML meta parsing with special characters.
Test that when a SaxParser splits a string containing special
characters into multiple childNodes there are no issues extracting
the text.
"""
meta_xml_str = """
<metadata>
<meta key="key3">value&3</meta>
<meta key="key2">value2</meta>
<meta key="key1">value1</meta>
</metadata>
""".strip()
meta_expected = {'key1': 'value1',
'key2': 'value2',
'key3': 'value&3'}
meta_deserializer = wsgi.MetadataXMLDeserializer()
document = wsgi.utils.safe_minidom_parse_string(meta_xml_str)
root_node = document.childNodes[0]
meta_extracted = meta_deserializer.extract_metadata(root_node)
self.assertEqual(meta_expected, meta_extracted)
class ResourceTest(test.TestCase):
def test_resource_call(self):
class Controller(object):
def index(self, req):
return 'off'
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual('off', response.body)
self.assertEqual(200, response.status_int)
def test_resource_not_authorized(self):
class Controller(object):
def index(self, req):
raise exception.NotAuthorized()
req = webob.Request.blank('/tests')
app = fakes.TestRouter(Controller())
response = req.get_response(app)
self.assertEqual(403, response.status_int)
def test_dispatch(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'index', None, '')
actual = resource.dispatch(method, None, {'pants': 'off'})
expected = 'off'
self.assertEqual(expected, actual)
def test_get_method_undefined_controller_action(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(AttributeError, resource.get_method,
None, 'create', None, '')
def test_get_method_action_json(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_xml(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(
None, 'action', 'application/xml', '<fooAction>true</fooAction>')
self.assertEqual(controller._action_foo, method)
def test_get_method_action_bad_body(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.MalformedRequestBody, resource.get_method,
None, 'action', 'application/json', '{}')
def test_get_method_unknown_controller_action(self):
class Controller(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(KeyError, resource.get_method,
None, 'action', 'application/json',
'{"barAction": true}')
def test_get_method_action_method(self):
class Controller(object):
def action(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
method, _extensions = resource.get_method(None, 'action',
'application/xml',
'<fooAction>true</fooAction')
self.assertEqual(controller.action, method)
def test_get_action_args(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
env = {
'wsgiorg.routing_args': [None, {
'controller': None,
'format': None,
'action': 'update',
'id': 12,
}],
}
expected = {'action': 'update', 'id': 12}
self.assertEqual(expected, resource.get_action_args(env))
def test_get_body_bad_content(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/none'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body_no_content_type(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body_no_content_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = ''
content_type, body = resource.get_body(request)
self.assertIsNone(content_type)
self.assertEqual('', body)
def test_get_body(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
request = wsgi.Request.blank('/', method='POST')
request.headers['Content-Type'] = 'application/json'
request.body = 'foo'
content_type, body = resource.get_body(request)
self.assertEqual('application/json', content_type)
self.assertEqual('foo', body)
def test_deserialize_badtype(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
self.assertRaises(exception.InvalidContentType,
resource.deserialize,
controller.index, 'application/none', 'foo')
def test_deserialize_default(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/json', 'foo')
self.assertEqual('json', obj)
def test_deserialize_decorator(self):
class JSONDeserializer(object):
def deserialize(self, body):
return 'json'
class XMLDeserializer(object):
def deserialize(self, body):
return 'xml'
class Controller(object):
@wsgi.deserializers(xml=XMLDeserializer)
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller, json=JSONDeserializer)
obj = resource.deserialize(controller.index, 'application/xml', 'foo')
self.assertEqual('xml', obj)
def test_register_actions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
@wsgi.action('barAction')
def _action_bar(self, req, id, body):
return body
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_actions)
extended = ControllerExtended()
resource.register_actions(extended)
self.assertEqual({'fooAction': extended._action_foo,
'barAction': extended._action_bar, },
resource.wsgi_actions)
def test_register_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp, id, body):
return None
controller = Controller()
resource = wsgi.Resource(controller)
self.assertEqual({}, resource.wsgi_extensions)
self.assertEqual({}, resource.wsgi_action_extensions)
extended = ControllerExtended()
resource.register_extensions(extended)
self.assertEqual({'index': [extended.index]}, resource.wsgi_extensions)
self.assertEqual({'fooAction': [extended._action_foo]},
resource.wsgi_action_extensions)
def test_get_method_extensions(self):
class Controller(object):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.extends
def index(self, req, resp_obj, pants=None):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'index', None, '')
self.assertEqual(controller.index, method)
self.assertEqual([extended.index], extensions)
def test_get_method_action_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
@wsgi.action('fooAction')
def _action_foo(self, req, id, body):
return body
class ControllerExtended(wsgi.Controller):
@wsgi.extends(action='fooAction')
def _action_foo(self, req, resp_obj, id, body):
return None
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_extensions(extended)
method, extensions = resource.get_method(None, 'action',
'application/json',
'{"fooAction": true}')
self.assertEqual(controller._action_foo, method)
self.assertEqual([extended._action_foo], extensions)
def test_get_method_action_whitelist_extensions(self):
class Controller(wsgi.Controller):
def index(self, req, pants=None):
return pants
class ControllerExtended(wsgi.Controller):
@wsgi.action('create')
def _create(self, req, body):
pass
@wsgi.action('delete')
def _delete(self, req, id):
pass
controller = Controller()
extended = ControllerExtended()
resource = wsgi.Resource(controller)
resource.register_actions(extended)
method, extensions = resource.get_method(None, 'create',
'application/json',
'{"create": true}')
self.assertEqual(extended._create, method)
self.assertEqual([], extensions)
method, extensions = resource.get_method(None, 'delete', None, None)
self.assertEqual(extended._delete, method)
self.assertEqual([], extensions)
def test_pre_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual([], called)
self.assertIsNone(response)
self.assertEqual([extension2, extension1], list(post))
def test_pre_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield
called.append('post1')
def extension2(req):
called.append('pre2')
yield
called.append('post2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
post = list(post)
self.assertEqual(['pre1', 'pre2'], called)
self.assertIsNone(response)
self.assertEqual(2, len(post))
self.assertTrue(inspect.isgenerator(post[0]))
self.assertTrue(inspect.isgenerator(post[1]))
for gen in post:
try:
gen.send(None)
except StopIteration:
continue
self.assertEqual(['pre1', 'pre2', 'post2', 'post1'], called)
def test_pre_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
called.append('pre1')
yield 'foo'
def extension2(req):
called.append('pre2')
extensions = [extension1, extension2]
response, post = resource.pre_process_extensions(extensions, None, {})
self.assertEqual(['pre1'], called)
self.assertEqual('foo', response)
self.assertEqual([], post)
def test_post_process_extensions_regular(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return None
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual([2, 1], called)
self.assertIsNone(response)
def test_post_process_extensions_regular_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req, resp_obj):
called.append(1)
return None
def extension2(req, resp_obj):
called.append(2)
return 'foo'
response = resource.post_process_extensions([extension2, extension1],
None, None, {})
self.assertEqual([2], called)
self.assertEqual('foo', response)
def test_post_process_extensions_generator(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual([2, 1], called)
self.assertIsNone(response)
def test_post_process_extensions_generator_response(self):
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
resource = wsgi.Resource(controller)
called = []
def extension1(req):
yield
called.append(1)
def extension2(req):
yield
called.append(2)
yield 'foo'
ext1 = extension1(None)
next(ext1)
ext2 = extension2(None)
next(ext2)
response = resource.post_process_extensions([ext2, ext1],
None, None, {})
self.assertEqual([2], called)
self.assertEqual('foo', response)
class ResponseObjectTest(test.TestCase):
def test_default_code(self):
robj = wsgi.ResponseObject({})
self.assertEqual(200, robj.code)
def test_modified_code(self):
robj = wsgi.ResponseObject({})
robj._default_code = 202
self.assertEqual(202, robj.code)
def test_override_default_code(self):
robj = wsgi.ResponseObject({}, code=404)
self.assertEqual(404, robj.code)
def test_override_modified_code(self):
robj = wsgi.ResponseObject({}, code=404)
robj._default_code = 202
self.assertEqual(404, robj.code)
def test_set_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual({'header': 'foo'}, robj.headers)
def test_get_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
self.assertEqual('foo', robj['hEADER'])
def test_del_header(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
del robj['hEADER']
self.assertNotIn('header', robj.headers)
def test_header_isolation(self):
robj = wsgi.ResponseObject({})
robj['Header'] = 'foo'
hdrs = robj.headers
hdrs['hEADER'] = 'bar'
self.assertEqual('foo', robj['hEADER'])
def test_default_serializers(self):
robj = wsgi.ResponseObject({})
self.assertEqual({}, robj.serializers)
def test_bind_serializers(self):
robj = wsgi.ResponseObject({}, json='foo')
robj._bind_method_serializers(dict(xml='bar', json='baz'))
self.assertEqual(dict(xml='bar', json='foo'), robj.serializers)
def test_get_serializer(self):
robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
_mtype, serializer = robj.get_serializer(content_type)
self.assertEqual(mtype, serializer)
def test_get_serializer_defaults(self):
robj = wsgi.ResponseObject({})
default_serializers = dict(json='json', xml='xml', atom='atom')
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
self.assertRaises(exception.InvalidContentType,
robj.get_serializer, content_type)
_mtype, serializer = robj.get_serializer(content_type,
default_serializers)
self.assertEqual(mtype, serializer)
def test_serialize(self):
class JSONSerializer(object):
def serialize(self, obj):
return 'json'
class XMLSerializer(object):
def serialize(self, obj):
return 'xml'
class AtomSerializer(object):
def serialize(self, obj):
return 'atom'
robj = wsgi.ResponseObject({}, code=202,
json=JSONSerializer,
xml=XMLSerializer,
atom=AtomSerializer)
robj['X-header1'] = 'header1'
robj['X-header2'] = 'header2'
for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
request = wsgi.Request.blank('/tests/123')
response = robj.serialize(request, content_type)
self.assertEqual(content_type, response.headers['Content-Type'])
self.assertEqual('header1', response.headers['X-header1'])
self.assertEqual('header2', response.headers['X-header2'])
self.assertEqual(202, response.status_int)
self.assertEqual(mtype, response.body)
class ValidBodyTest(test.TestCase):
def setUp(self):
super(ValidBodyTest, self).setUp()
self.controller = wsgi.Controller()
def test_is_valid_body(self):
body = {'foo': {}}
self.assertTrue(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_none(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body(None, 'foo'))
def test_is_valid_body_empty(self):
wsgi.Resource(controller=None)
self.assertFalse(self.controller.is_valid_body({}, 'foo'))
def test_is_valid_body_no_entity(self):
wsgi.Resource(controller=None)
body = {'bar': {}}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_is_valid_body_malformed_entity(self):
wsgi.Resource(controller=None)
body = {'foo': 'bar'}
self.assertFalse(self.controller.is_valid_body(body, 'foo'))
def test_validate_string_length_with_name_too_long(self):
name = 'a' * 256
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_string_length,
name, 'Name', min_length=1, max_length=255,
remove_whitespaces=False)
def test_validate_string_length_with_name_contains_white_spaces(
self):
body = {'name': 'a' * 255 + " "}
self.controller.validate_string_length(
body['name'], 'name', min_length=1, max_length=255,
remove_whitespaces=True)
def test_validate_name_and_description_with_name_too_long(self):
body = {'name': 'a' * 256}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_desc_too_long(self):
body = {'description': 'a' * 256}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_name_as_int(self):
body = {'name': 1234}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_desc_as_int(self):
body = {'description': 1234}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_name_and_description,
body)
def test_validate_name_and_description_with_name_zero_length(self):
# NOTE(jdg): We allow zero length names currently, particularly
# from Nova, changes to this require an API version bump
body = {'name': ""}
self.controller.validate_name_and_description(body)
self.assertEqual('', body['name'])
def test_validate_name_and_description_with_desc_zero_length(self):
body = {'description': ""}
self.controller.validate_name_and_description(body)
self.assertEqual('', body['description'])
def test_validate_name_and_description_with_name_contains_white_spaces(
self):
body = {'name': 'a' * 255 + " "}
self.controller.validate_name_and_description(body)
self.assertEqual('a' * 255, body['name'])
def test_validate_integer_greater_than_max_int_limit(self):
value = (2 ** 31) + 1
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
def test_validate_integer_less_than_min_int_limit(self):
value = -12
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
def test_validate_integer_invalid_limit(self):
value = "should_be_int"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_integer,
value, 'limit', min_value=-1, max_value=(2 ** 31))
| |
import os
import os.path
import re
import codecs
import fnmatch
import logging
import itertools
from .utils import (PageNotFoundError, NamespaceNotFoundError,
split_page_url)
META_ENDPOINT = '_meta'
logger = logging.getLogger(__name__)
valid_filename_pattern = re.compile('^[\w \.\-\(\)\[\]\\/]+$', re.UNICODE)
class PageInfo(object):
def __init__(self, url, path):
self.url = url
self.path = path
self._content = None
@property
def content(self):
if self._content is None:
with codecs.open(self.path, 'r', encoding='utf-8') as f:
self._content = f.read()
return self._content
class FileSystem(object):
""" A class responsible for mapping page URLs to
file-system paths, and for scanning the file-system
to list existing pages.
"""
def __init__(self, root, config):
self.root = root
self.wiki = None
self.excluded = None
self.page_extensions = None
self.default_extension = config.get('wiki', 'default_extension')
self.include_builtin_endpoints = True
def start(self, wiki):
self.wiki = wiki
self.page_extensions = list(set(
itertools.chain(*wiki.formatters.values())))
excluded = []
excluded += wiki.getSpecialFilenames()
excluded += wiki.scm.getSpecialFilenames()
self.excluded = [os.path.join(self.root, e) for e in excluded]
def init(self, wiki):
pass
def postInit(self):
pass
def getPageInfos(self, subdir=None):
basepath = self.root
if subdir is not None:
basepath = self.getPhysicalNamespacePath(subdir)
yield from self._getPageInfos(basepath)
if subdir is None and self.include_builtin_endpoints:
for ep in self.wiki.getBuiltinEndpoints():
yield from self._getPageInfos(ep.root_dir)
def getPageInfo(self, path):
logger.debug("Reading page info from: %s" % path)
for e in self.excluded:
if fnmatch.fnmatch(path, e):
return None
return self._getPageInfo(path)
def findPageInfo(self, url):
logger.debug("Searching for page: %s" % url)
path = self.getPhysicalPagePath(url)
return PageInfo(url, path)
def setPage(self, url, content):
path = self.getPhysicalPagePath(url, make_new=True)
logger.debug("Saving page '%s' to: %s" % (url, path))
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname, 0o775)
with codecs.open(path, 'w', encoding='utf-8') as f:
f.write(content)
return PageInfo(url, path)
def pageExists(self, url):
logger.debug("Searching for page: %s" % url)
try:
self.getPhysicalPagePath(url)
return True
except PageNotFoundError:
return False
def getPhysicalPagePath(self, url, make_new=False):
return self._getPhysicalPath(url, is_file=True, make_new=make_new)
def getPhysicalNamespacePath(self, url, make_new=False):
return self._getPhysicalPath(url, is_file=False, make_new=make_new)
def _getPageInfos(self, root_dir):
logger.debug("Scanning for pages in: %s" % root_dir)
for dirpath, dirnames, filenames in os.walk(root_dir):
incl_dirnames = []
for d in dirnames:
full_d = os.path.join(dirpath, d)
for e in self.excluded:
if fnmatch.fnmatch(full_d, e):
break
else:
incl_dirnames.append(d)
dirnames[:] = incl_dirnames
for filename in filenames:
path = os.path.join(dirpath, filename)
page_info = self.getPageInfo(path)
if page_info is not None:
yield page_info
def _getPageInfo(self, path):
meta = None
abs_path = os.path.abspath(path)
rel_path = os.path.relpath(path, self.root)
if rel_path.startswith(META_ENDPOINT + os.sep):
rel_path = rel_path[len(META_ENDPOINT) + 1:]
meta, rel_path = rel_path.split(os.sep, 1)
elif self.include_builtin_endpoints:
for ep in self.wiki.getBuiltinEndpoints():
if abs_path.startswith(ep.root_dir):
meta = ep.name
rel_path = abs_path[len(ep.root_dir) + 1:]
break
rel_path_split = os.path.splitext(rel_path)
ext = rel_path_split[1].lstrip('.')
name = rel_path_split[0].replace(os.sep, '/')
if len(ext) == 0:
return None
if (self.page_extensions is not None and
ext not in self.page_extensions):
return None
url = '/' + name
if meta:
url = "%s:/%s" % (meta.lower(), name)
return PageInfo(url, abs_path)
def _getPhysicalPath(self, url, is_file=True, make_new=False):
endpoint, url = split_page_url(url)
if url[0] != '/':
raise ValueError("Page URLs need to be absolute: " + url)
if '..' in url:
raise ValueError("Page URLs can't contain '..': " + url)
# Find the root directory in which we'll be searching for the
# page file.
root = self.root
if endpoint:
ep_info = self.wiki.getEndpoint(endpoint)
if ep_info is None or not ep_info.builtin:
root = os.path.join(self.root, META_ENDPOINT, endpoint)
else:
root = ep_info.root_dir
# Make the URL into a relative file-system path.
url_path = url[1:].replace('/', os.sep)
if url_path[0] == os.sep:
raise ValueError("Page URLs can only have one slash at the "
"beginning. Got: %s" % url)
# If we want a non-existing file's path, just build that.
if make_new:
if (url_path[-1] == os.sep or
not valid_filename_pattern.match(url_path)):
raise ValueError("Invalid URL: %s" % url_path)
return os.path.join(root, url_path + '.' + self.default_extension)
# Find the right file-system entry for this URL.
url_path = os.path.join(root, url_path)
if is_file:
dirname, basename = os.path.split(url_path)
if basename == '':
raise ValueError("Invalid URL: %s" % url_path)
if not os.path.isdir(dirname):
self._throwNotFoundError(url, root, is_file)
it = os.walk(dirname)
# TODO: This is weird, `itertools.islice` seems useless here.
for _, __, ___ in it:
filenames = ___
break
for filename in filenames:
name, ext = os.path.splitext(filename)
if name == basename:
return os.path.join(dirname, filename)
self._throwNotFoundError(url, root, is_file)
else:
if os.path.isdir(url_path):
return url_path
self._throwNotFoundError(url, root, is_file)
def _throwNotFoundError(self, url, searched, is_file):
if is_file:
raise PageNotFoundError("No such page '%s' in: %s" %
(url, searched))
else:
raise NamespaceNotFoundError("No such namespace '%s' in: %s" %
(url, searched))
| |
##########################################################################
#
# Copyright 2012 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from dxgi import *
from d3dcommon import *
from d3d11sdklayers import *
HRESULT = MAKE_HRESULT([
"D3D11_ERROR_FILE_NOT_FOUND",
"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS",
"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS",
"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD",
"D3DERR_INVALIDCALL",
"D3DERR_WASSTILLDRAWING",
])
ID3D11DepthStencilState = Interface("ID3D11DepthStencilState", ID3D11DeviceChild)
ID3D11BlendState = Interface("ID3D11BlendState", ID3D11DeviceChild)
ID3D11RasterizerState = Interface("ID3D11RasterizerState", ID3D11DeviceChild)
ID3D11Resource = Interface("ID3D11Resource", ID3D11DeviceChild)
ID3D11Buffer = Interface("ID3D11Buffer", ID3D11Resource)
ID3D11Texture1D = Interface("ID3D11Texture1D", ID3D11Resource)
ID3D11Texture2D = Interface("ID3D11Texture2D", ID3D11Resource)
ID3D11Texture3D = Interface("ID3D11Texture3D", ID3D11Resource)
ID3D11View = Interface("ID3D11View", ID3D11DeviceChild)
ID3D11ShaderResourceView = Interface("ID3D11ShaderResourceView", ID3D11View)
ID3D11RenderTargetView = Interface("ID3D11RenderTargetView", ID3D11View)
ID3D11DepthStencilView = Interface("ID3D11DepthStencilView", ID3D11View)
ID3D11UnorderedAccessView = Interface("ID3D11UnorderedAccessView", ID3D11View)
ID3D11VertexShader = Interface("ID3D11VertexShader", ID3D11DeviceChild)
ID3D11HullShader = Interface("ID3D11HullShader", ID3D11DeviceChild)
ID3D11DomainShader = Interface("ID3D11DomainShader", ID3D11DeviceChild)
ID3D11GeometryShader = Interface("ID3D11GeometryShader", ID3D11DeviceChild)
ID3D11PixelShader = Interface("ID3D11PixelShader", ID3D11DeviceChild)
ID3D11ComputeShader = Interface("ID3D11ComputeShader", ID3D11DeviceChild)
ID3D11InputLayout = Interface("ID3D11InputLayout", ID3D11DeviceChild)
ID3D11SamplerState = Interface("ID3D11SamplerState", ID3D11DeviceChild)
ID3D11Asynchronous = Interface("ID3D11Asynchronous", ID3D11DeviceChild)
ID3D11Query = Interface("ID3D11Query", ID3D11Asynchronous)
ID3D11Predicate = Interface("ID3D11Predicate", ID3D11Query)
ID3D11Counter = Interface("ID3D11Counter", ID3D11Asynchronous)
ID3D11ClassInstance = Interface("ID3D11ClassInstance", ID3D11DeviceChild)
ID3D11ClassLinkage = Interface("ID3D11ClassLinkage", ID3D11DeviceChild)
ID3D11CommandList = Interface("ID3D11CommandList", ID3D11DeviceChild)
ID3D11Device = Interface("ID3D11Device", IUnknown)
D3D11_INPUT_CLASSIFICATION = Enum("D3D11_INPUT_CLASSIFICATION", [
"D3D11_INPUT_PER_VERTEX_DATA",
"D3D11_INPUT_PER_INSTANCE_DATA",
])
D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [
"D3D11_APPEND_ALIGNED_ELEMENT",
])
D3D11_INPUT_ELEMENT_DESC = Struct("D3D11_INPUT_ELEMENT_DESC", [
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(DXGI_FORMAT, "Format"),
(UINT, "InputSlot"),
(D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, "AlignedByteOffset"),
(D3D11_INPUT_CLASSIFICATION, "InputSlotClass"),
(UINT, "InstanceDataStepRate"),
])
D3D11_FILL_MODE = Enum("D3D11_FILL_MODE", [
"D3D11_FILL_WIREFRAME",
"D3D11_FILL_SOLID",
])
D3D11_PRIMITIVE_TOPOLOGY = Enum("D3D11_PRIMITIVE_TOPOLOGY", [
"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED",
"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST",
])
D3D11_PRIMITIVE = Enum("D3D11_PRIMITIVE", [
"D3D11_PRIMITIVE_UNDEFINED",
"D3D11_PRIMITIVE_POINT",
"D3D11_PRIMITIVE_LINE",
"D3D11_PRIMITIVE_TRIANGLE",
"D3D11_PRIMITIVE_LINE_ADJ",
"D3D11_PRIMITIVE_TRIANGLE_ADJ",
"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH",
])
D3D11_CULL_MODE = Enum("D3D11_CULL_MODE", [
"D3D11_CULL_NONE",
"D3D11_CULL_FRONT",
"D3D11_CULL_BACK",
])
D3D11_SO_DECLARATION_ENTRY = Struct("D3D11_SO_DECLARATION_ENTRY", [
(UINT, "Stream"),
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(BYTE, "StartComponent"),
(BYTE, "ComponentCount"),
(BYTE, "OutputSlot"),
])
D3D11_VIEWPORT = Struct("D3D11_VIEWPORT", [
(FLOAT, "TopLeftX"),
(FLOAT, "TopLeftY"),
(FLOAT, "Width"),
(FLOAT, "Height"),
(FLOAT, "MinDepth"),
(FLOAT, "MaxDepth"),
])
D3D11_RESOURCE_DIMENSION = Enum("D3D11_RESOURCE_DIMENSION", [
"D3D11_RESOURCE_DIMENSION_UNKNOWN",
"D3D11_RESOURCE_DIMENSION_BUFFER",
"D3D11_RESOURCE_DIMENSION_TEXTURE1D",
"D3D11_RESOURCE_DIMENSION_TEXTURE2D",
"D3D11_RESOURCE_DIMENSION_TEXTURE3D",
])
D3D11_SRV_DIMENSION = Enum("D3D11_SRV_DIMENSION", [
"D3D11_SRV_DIMENSION_UNKNOWN",
"D3D11_SRV_DIMENSION_BUFFER",
"D3D11_SRV_DIMENSION_TEXTURE1D",
"D3D11_SRV_DIMENSION_TEXTURE1DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2D",
"D3D11_SRV_DIMENSION_TEXTURE2DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2DMS",
"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_SRV_DIMENSION_TEXTURE3D",
"D3D11_SRV_DIMENSION_TEXTURECUBE",
"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY",
"D3D11_SRV_DIMENSION_BUFFEREX",
])
D3D11_DSV_DIMENSION = Enum("D3D11_DSV_DIMENSION", [
"D3D11_DSV_DIMENSION_UNKNOWN",
"D3D11_DSV_DIMENSION_TEXTURE1D",
"D3D11_DSV_DIMENSION_TEXTURE1DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2D",
"D3D11_DSV_DIMENSION_TEXTURE2DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2DMS",
"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY",
])
D3D11_RTV_DIMENSION = Enum("D3D11_RTV_DIMENSION", [
"D3D11_RTV_DIMENSION_UNKNOWN",
"D3D11_RTV_DIMENSION_BUFFER",
"D3D11_RTV_DIMENSION_TEXTURE1D",
"D3D11_RTV_DIMENSION_TEXTURE1DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2D",
"D3D11_RTV_DIMENSION_TEXTURE2DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2DMS",
"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_RTV_DIMENSION_TEXTURE3D",
])
D3D11_UAV_DIMENSION = Enum("D3D11_UAV_DIMENSION", [
"D3D11_UAV_DIMENSION_UNKNOWN",
"D3D11_UAV_DIMENSION_BUFFER",
"D3D11_UAV_DIMENSION_TEXTURE1D",
"D3D11_UAV_DIMENSION_TEXTURE1DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE2D",
"D3D11_UAV_DIMENSION_TEXTURE2DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE3D",
])
D3D11_USAGE = Enum("D3D11_USAGE", [
"D3D11_USAGE_DEFAULT",
"D3D11_USAGE_IMMUTABLE",
"D3D11_USAGE_DYNAMIC",
"D3D11_USAGE_STAGING",
])
D3D11_BIND_FLAG = Flags(UINT, [
"D3D11_BIND_VERTEX_BUFFER",
"D3D11_BIND_INDEX_BUFFER",
"D3D11_BIND_CONSTANT_BUFFER",
"D3D11_BIND_SHADER_RESOURCE",
"D3D11_BIND_STREAM_OUTPUT",
"D3D11_BIND_RENDER_TARGET",
"D3D11_BIND_DEPTH_STENCIL",
"D3D11_BIND_UNORDERED_ACCESS",
])
D3D11_CPU_ACCESS_FLAG = Flags(UINT, [
"D3D11_CPU_ACCESS_WRITE",
"D3D11_CPU_ACCESS_READ",
])
D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [
"D3D11_RESOURCE_MISC_GENERATE_MIPS",
"D3D11_RESOURCE_MISC_SHARED",
"D3D11_RESOURCE_MISC_TEXTURECUBE",
"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS",
"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS",
"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED",
"D3D11_RESOURCE_MISC_RESOURCE_CLAMP",
"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX",
"D3D11_RESOURCE_MISC_GDI_COMPATIBLE",
])
D3D11_MAP = Enum("D3D11_MAP", [
"D3D11_MAP_READ",
"D3D11_MAP_WRITE",
"D3D11_MAP_READ_WRITE",
"D3D11_MAP_WRITE_DISCARD",
"D3D11_MAP_WRITE_NO_OVERWRITE",
])
D3D11_MAP_FLAG = Flags(UINT, [
"D3D11_MAP_FLAG_DO_NOT_WAIT",
])
D3D11_RAISE_FLAG = Flags(UINT, [
"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR",
])
D3D11_CLEAR_FLAG = Flags(UINT, [
"D3D11_CLEAR_DEPTH",
"D3D11_CLEAR_STENCIL",
])
D3D11_RECT = Alias("D3D11_RECT", RECT)
D3D11_BOX = Struct("D3D11_BOX", [
(UINT, "left"),
(UINT, "top"),
(UINT, "front"),
(UINT, "right"),
(UINT, "bottom"),
(UINT, "back"),
])
ID3D11DeviceChild.methods += [
StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")], sideeffects=False),
]
D3D11_COMPARISON_FUNC = Enum("D3D11_COMPARISON_FUNC", [
"D3D11_COMPARISON_NEVER",
"D3D11_COMPARISON_LESS",
"D3D11_COMPARISON_EQUAL",
"D3D11_COMPARISON_LESS_EQUAL",
"D3D11_COMPARISON_GREATER",
"D3D11_COMPARISON_NOT_EQUAL",
"D3D11_COMPARISON_GREATER_EQUAL",
"D3D11_COMPARISON_ALWAYS",
])
D3D11_DEPTH_WRITE_MASK = Enum("D3D11_DEPTH_WRITE_MASK", [
"D3D11_DEPTH_WRITE_MASK_ZERO",
"D3D11_DEPTH_WRITE_MASK_ALL",
])
D3D11_STENCIL_OP = Enum("D3D11_STENCIL_OP", [
"D3D11_STENCIL_OP_KEEP",
"D3D11_STENCIL_OP_ZERO",
"D3D11_STENCIL_OP_REPLACE",
"D3D11_STENCIL_OP_INCR_SAT",
"D3D11_STENCIL_OP_DECR_SAT",
"D3D11_STENCIL_OP_INVERT",
"D3D11_STENCIL_OP_INCR",
"D3D11_STENCIL_OP_DECR",
])
D3D11_DEPTH_STENCILOP_DESC = Struct("D3D11_DEPTH_STENCILOP_DESC", [
(D3D11_STENCIL_OP, "StencilFailOp"),
(D3D11_STENCIL_OP, "StencilDepthFailOp"),
(D3D11_STENCIL_OP, "StencilPassOp"),
(D3D11_COMPARISON_FUNC, "StencilFunc"),
])
D3D11_DEPTH_STENCIL_DESC = Struct("D3D11_DEPTH_STENCIL_DESC", [
(BOOL, "DepthEnable"),
(D3D11_DEPTH_WRITE_MASK, "DepthWriteMask"),
(D3D11_COMPARISON_FUNC, "DepthFunc"),
(BOOL, "StencilEnable"),
(UINT8, "StencilReadMask"),
(UINT8, "StencilWriteMask"),
(D3D11_DEPTH_STENCILOP_DESC, "FrontFace"),
(D3D11_DEPTH_STENCILOP_DESC, "BackFace"),
])
ID3D11DepthStencilState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), "pDesc")], sideeffects=False),
]
D3D11_BLEND = Enum("D3D11_BLEND", [
"D3D11_BLEND_ZERO",
"D3D11_BLEND_ONE",
"D3D11_BLEND_SRC_COLOR",
"D3D11_BLEND_INV_SRC_COLOR",
"D3D11_BLEND_SRC_ALPHA",
"D3D11_BLEND_INV_SRC_ALPHA",
"D3D11_BLEND_DEST_ALPHA",
"D3D11_BLEND_INV_DEST_ALPHA",
"D3D11_BLEND_DEST_COLOR",
"D3D11_BLEND_INV_DEST_COLOR",
"D3D11_BLEND_SRC_ALPHA_SAT",
"D3D11_BLEND_BLEND_FACTOR",
"D3D11_BLEND_INV_BLEND_FACTOR",
"D3D11_BLEND_SRC1_COLOR",
"D3D11_BLEND_INV_SRC1_COLOR",
"D3D11_BLEND_SRC1_ALPHA",
"D3D11_BLEND_INV_SRC1_ALPHA",
])
D3D11_BLEND_OP = Enum("D3D11_BLEND_OP", [
"D3D11_BLEND_OP_ADD",
"D3D11_BLEND_OP_SUBTRACT",
"D3D11_BLEND_OP_REV_SUBTRACT",
"D3D11_BLEND_OP_MIN",
"D3D11_BLEND_OP_MAX",
])
D3D11_COLOR_WRITE_ENABLE = Enum("D3D11_COLOR_WRITE_ENABLE", [
"D3D11_COLOR_WRITE_ENABLE_ALL",
"D3D11_COLOR_WRITE_ENABLE_RED",
"D3D11_COLOR_WRITE_ENABLE_GREEN",
"D3D11_COLOR_WRITE_ENABLE_BLUE",
"D3D11_COLOR_WRITE_ENABLE_ALPHA",
])
D3D11_RENDER_TARGET_BLEND_DESC = Struct("D3D11_RENDER_TARGET_BLEND_DESC", [
(BOOL, "BlendEnable"),
(D3D11_BLEND, "SrcBlend"),
(D3D11_BLEND, "DestBlend"),
(D3D11_BLEND_OP, "BlendOp"),
(D3D11_BLEND, "SrcBlendAlpha"),
(D3D11_BLEND, "DestBlendAlpha"),
(D3D11_BLEND_OP, "BlendOpAlpha"),
(UINT8, "RenderTargetWriteMask"),
])
D3D11_BLEND_DESC = Struct("D3D11_BLEND_DESC", [
(BOOL, "AlphaToCoverageEnable"),
(BOOL, "IndependentBlendEnable"),
(Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), "RenderTarget"),
])
ID3D11BlendState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BLEND_DESC), "pDesc")], sideeffects=False),
]
D3D11_RASTERIZER_DESC = Struct("D3D11_RASTERIZER_DESC", [
(D3D11_FILL_MODE, "FillMode"),
(D3D11_CULL_MODE, "CullMode"),
(BOOL, "FrontCounterClockwise"),
(INT, "DepthBias"),
(FLOAT, "DepthBiasClamp"),
(FLOAT, "SlopeScaledDepthBias"),
(BOOL, "DepthClipEnable"),
(BOOL, "ScissorEnable"),
(BOOL, "MultisampleEnable"),
(BOOL, "AntialiasedLineEnable"),
])
ID3D11RasterizerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RASTERIZER_DESC), "pDesc")], sideeffects=False),
]
D3D11_SUBRESOURCE_DATA = Struct("D3D11_SUBRESOURCE_DATA", [
(Blob(Const(Void), "_calcSubresourceSize(pDesc, {i}, {self}.SysMemPitch, {self}.SysMemSlicePitch)"), "pSysMem"),
(UINT, "SysMemPitch"),
(UINT, "SysMemSlicePitch"),
])
D3D11_MAPPED_SUBRESOURCE = Struct("D3D11_MAPPED_SUBRESOURCE", [
(LinearPointer(Void, "_MappedSize"), "pData"),
(UINT, "RowPitch"),
(UINT, "DepthPitch"),
])
ID3D11Resource.methods += [
StdMethod(Void, "GetType", [Out(Pointer(D3D11_RESOURCE_DIMENSION), "pResourceDimension")], sideeffects=False),
StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]),
StdMethod(UINT, "GetEvictionPriority", [], sideeffects=False),
]
D3D11_BUFFER_DESC = Struct("D3D11_BUFFER_DESC", [
(UINT, "ByteWidth"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
(UINT, "StructureByteStride"),
])
ID3D11Buffer.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BUFFER_DESC), "pDesc")], sideeffects=False),
]
D3D11_TEXTURE1D_DESC = Struct("D3D11_TEXTURE1D_DESC", [
(UINT, "Width"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture1D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE1D_DESC), "pDesc")], sideeffects=False),
]
D3D11_TEXTURE2D_DESC = Struct("D3D11_TEXTURE2D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture2D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE2D_DESC), "pDesc")], sideeffects=False),
]
D3D11_TEXTURE3D_DESC = Struct("D3D11_TEXTURE3D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
(UINT, "MipLevels"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture3D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE3D_DESC), "pDesc")], sideeffects=False),
]
D3D11_TEXTURECUBE_FACE = Enum("D3D11_TEXTURECUBE_FACE", [
"D3D11_TEXTURECUBE_FACE_POSITIVE_X",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_X",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Y",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Z",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z",
])
ID3D11View.methods += [
StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D11Resource)), "ppResource")]),
]
D3D11_BUFFER_SRV = Struct("D3D11_BUFFER_SRV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
])
D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [
"D3D11_BUFFEREX_SRV_FLAG_RAW",
])
D3D11_BUFFEREX_SRV = Struct("D3D11_BUFFEREX_SRV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFEREX_SRV_FLAG, "Flags"),
])
D3D11_TEX1D_SRV = Struct("D3D11_TEX1D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX1D_ARRAY_SRV = Struct("D3D11_TEX1D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_SRV = Struct("D3D11_TEX2D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX2D_ARRAY_SRV = Struct("D3D11_TEX2D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_SRV = Struct("D3D11_TEX3D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_SRV = Struct("D3D11_TEXCUBE_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_ARRAY_SRV = Struct("D3D11_TEXCUBE_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "First2DArrayFace"),
(UINT, "NumCubes"),
])
D3D11_TEX2DMS_SRV = Struct("D3D11_TEX2DMS_SRV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_SRV = Struct("D3D11_TEX2DMS_ARRAY_SRV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_SHADER_RESOURCE_VIEW_DESC = Struct("D3D11_SHADER_RESOURCE_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_SRV_DIMENSION, "ViewDimension"),
(Union("{self}.ViewDimension", [
("D3D11_SRV_DIMENSION_BUFFER", D3D11_BUFFER_SRV, "Buffer"),
("D3D11_SRV_DIMENSION_TEXTURE1D", D3D11_TEX1D_SRV, "Texture1D"),
("D3D11_SRV_DIMENSION_TEXTURE1DARRAY", D3D11_TEX1D_ARRAY_SRV, "Texture1DArray"),
("D3D11_SRV_DIMENSION_TEXTURE2D", D3D11_TEX2D_SRV, "Texture2D"),
("D3D11_SRV_DIMENSION_TEXTURE2DARRAY", D3D11_TEX2D_ARRAY_SRV, "Texture2DArray"),
("D3D11_SRV_DIMENSION_TEXTURE2DMS", D3D11_TEX2DMS_SRV, "Texture2DMS"),
("D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY", D3D11_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"),
("D3D11_SRV_DIMENSION_TEXTURE3D", D3D11_TEX3D_SRV, "Texture3D"),
("D3D11_SRV_DIMENSION_TEXTURECUBE", D3D11_TEXCUBE_SRV, "TextureCube"),
("D3D11_SRV_DIMENSION_TEXTURECUBEARRAY", D3D11_TEXCUBE_ARRAY_SRV, "TextureCubeArray"),
("D3D11_SRV_DIMENSION_BUFFEREX", D3D11_BUFFEREX_SRV, "BufferEx"),
]), None),
])
ID3D11ShaderResourceView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), "pDesc")], sideeffects=False),
]
D3D11_BUFFER_RTV = Struct("D3D11_BUFFER_RTV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
])
D3D11_TEX1D_RTV = Struct("D3D11_TEX1D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_RTV = Struct("D3D11_TEX1D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_RTV = Struct("D3D11_TEX2D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX2DMS_RTV = Struct("D3D11_TEX2DMS_RTV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2D_ARRAY_RTV = Struct("D3D11_TEX2D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_ARRAY_RTV = Struct("D3D11_TEX2DMS_ARRAY_RTV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_RTV = Struct("D3D11_TEX3D_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_RENDER_TARGET_VIEW_DESC = Struct("D3D11_RENDER_TARGET_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_RTV_DIMENSION, "ViewDimension"),
(Union("{self}.ViewDimension", [
("D3D11_RTV_DIMENSION_BUFFER", D3D11_BUFFER_RTV, "Buffer"),
("D3D11_RTV_DIMENSION_TEXTURE1D", D3D11_TEX1D_RTV, "Texture1D"),
("D3D11_RTV_DIMENSION_TEXTURE1DARRAY", D3D11_TEX1D_ARRAY_RTV, "Texture1DArray"),
("D3D11_RTV_DIMENSION_TEXTURE2D", D3D11_TEX2D_RTV, "Texture2D"),
("D3D11_RTV_DIMENSION_TEXTURE2DARRAY", D3D11_TEX2D_ARRAY_RTV, "Texture2DArray"),
("D3D11_RTV_DIMENSION_TEXTURE2DMS", D3D11_TEX2DMS_RTV, "Texture2DMS"),
("D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY", D3D11_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"),
("D3D11_RTV_DIMENSION_TEXTURE3D", D3D11_TEX3D_RTV, "Texture3D"),
]), None),
])
ID3D11RenderTargetView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), "pDesc")], sideeffects=False),
]
D3D11_TEX1D_DSV = Struct("D3D11_TEX1D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_DSV = Struct("D3D11_TEX1D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_DSV = Struct("D3D11_TEX2D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_DSV = Struct("D3D11_TEX2D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_DSV = Struct("D3D11_TEX2DMS_DSV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_DSV = Struct("D3D11_TEX2DMS_ARRAY_DSV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_DSV_FLAG = Flags(UINT, [
"D3D11_DSV_READ_ONLY_DEPTH",
"D3D11_DSV_READ_ONLY_STENCIL",
])
D3D11_DEPTH_STENCIL_VIEW_DESC = Struct("D3D11_DEPTH_STENCIL_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_DSV_DIMENSION, "ViewDimension"),
(D3D11_DSV_FLAG, "Flags"),
(Union("{self}.ViewDimension", [
("D3D11_DSV_DIMENSION_TEXTURE1D", D3D11_TEX1D_DSV, "Texture1D"),
("D3D11_DSV_DIMENSION_TEXTURE1DARRAY", D3D11_TEX1D_ARRAY_DSV, "Texture1DArray"),
("D3D11_DSV_DIMENSION_TEXTURE2D", D3D11_TEX2D_DSV, "Texture2D"),
("D3D11_DSV_DIMENSION_TEXTURE2DARRAY", D3D11_TEX2D_ARRAY_DSV, "Texture2DArray"),
("D3D11_DSV_DIMENSION_TEXTURE2DMS", D3D11_TEX2DMS_DSV, "Texture2DMS"),
("D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY", D3D11_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"),
]), None),
])
ID3D11DepthStencilView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), "pDesc")], sideeffects=False),
]
D3D11_BUFFER_UAV_FLAG = Flags(UINT, [
"D3D11_BUFFER_UAV_FLAG_RAW",
"D3D11_BUFFER_UAV_FLAG_APPEND",
"D3D11_BUFFER_UAV_FLAG_COUNTER",
])
D3D11_BUFFER_UAV = Struct("D3D11_BUFFER_UAV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFER_UAV_FLAG, "Flags"),
])
D3D11_TEX1D_UAV = Struct("D3D11_TEX1D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_UAV = Struct("D3D11_TEX1D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_UAV = Struct("D3D11_TEX2D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_UAV = Struct("D3D11_TEX2D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_UAV = Struct("D3D11_TEX3D_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct("D3D11_UNORDERED_ACCESS_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_UAV_DIMENSION, "ViewDimension"),
(Union("{self}.ViewDimension", [
("D3D11_UAV_DIMENSION_BUFFER", D3D11_BUFFER_UAV, "Buffer"),
("D3D11_UAV_DIMENSION_TEXTURE1D", D3D11_TEX1D_UAV, "Texture1D"),
("D3D11_UAV_DIMENSION_TEXTURE1DARRAY", D3D11_TEX1D_ARRAY_UAV, "Texture1DArray"),
("D3D11_UAV_DIMENSION_TEXTURE2D", D3D11_TEX2D_UAV, "Texture2D"),
("D3D11_UAV_DIMENSION_TEXTURE2DARRAY", D3D11_TEX2D_ARRAY_UAV, "Texture2DArray"),
("D3D11_UAV_DIMENSION_TEXTURE3D", D3D11_TEX3D_UAV, "Texture3D"),
]), None),
])
ID3D11UnorderedAccessView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), "pDesc")], sideeffects=False),
]
D3D11_FILTER = Enum("D3D11_FILTER", [
"D3D11_FILTER_MIN_MAG_MIP_POINT",
"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_ANISOTROPIC",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_ANISOTROPIC",
])
D3D11_FILTER_TYPE = Enum("D3D11_FILTER_TYPE", [
"D3D11_FILTER_TYPE_POINT",
"D3D11_FILTER_TYPE_LINEAR",
])
D3D11_TEXTURE_ADDRESS_MODE = Enum("D3D11_TEXTURE_ADDRESS_MODE", [
"D3D11_TEXTURE_ADDRESS_WRAP",
"D3D11_TEXTURE_ADDRESS_MIRROR",
"D3D11_TEXTURE_ADDRESS_CLAMP",
"D3D11_TEXTURE_ADDRESS_BORDER",
"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE",
])
D3D11_SAMPLER_DESC = Struct("D3D11_SAMPLER_DESC", [
(D3D11_FILTER, "Filter"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressU"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressV"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressW"),
(FLOAT, "MipLODBias"),
(UINT, "MaxAnisotropy"),
(D3D11_COMPARISON_FUNC, "ComparisonFunc"),
(Array(FLOAT, 4), "BorderColor"),
(FLOAT, "MinLOD"),
(FLOAT, "MaxLOD"),
])
ID3D11SamplerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SAMPLER_DESC), "pDesc")], sideeffects=False),
]
D3D11_FORMAT_SUPPORT = Flags(UINT, [
"D3D11_FORMAT_SUPPORT_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER",
"D3D11_FORMAT_SUPPORT_SO_BUFFER",
"D3D11_FORMAT_SUPPORT_TEXTURE1D",
"D3D11_FORMAT_SUPPORT_TEXTURE2D",
"D3D11_FORMAT_SUPPORT_TEXTURE3D",
"D3D11_FORMAT_SUPPORT_TEXTURECUBE",
"D3D11_FORMAT_SUPPORT_SHADER_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT",
"D3D11_FORMAT_SUPPORT_MIP",
"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN",
"D3D11_FORMAT_SUPPORT_RENDER_TARGET",
"D3D11_FORMAT_SUPPORT_BLENDABLE",
"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL",
"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE",
"D3D11_FORMAT_SUPPORT_DISPLAY",
"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER",
"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST",
"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON",
])
D3D11_FORMAT_SUPPORT2 = Enum("D3D11_FORMAT_SUPPORT2", [
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE",
])
ID3D11Asynchronous.methods += [
StdMethod(UINT, "GetDataSize", [], sideeffects=False),
]
D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [
"D3D11_ASYNC_GETDATA_DONOTFLUSH",
])
D3D11_QUERY = Enum("D3D11_QUERY", [
"D3D11_QUERY_EVENT",
"D3D11_QUERY_OCCLUSION",
"D3D11_QUERY_TIMESTAMP",
"D3D11_QUERY_TIMESTAMP_DISJOINT",
"D3D11_QUERY_PIPELINE_STATISTICS",
"D3D11_QUERY_OCCLUSION_PREDICATE",
"D3D11_QUERY_SO_STATISTICS",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE",
"D3D11_QUERY_SO_STATISTICS_STREAM0",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0",
"D3D11_QUERY_SO_STATISTICS_STREAM1",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1",
"D3D11_QUERY_SO_STATISTICS_STREAM2",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2",
"D3D11_QUERY_SO_STATISTICS_STREAM3",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3",
])
D3D11_QUERY_MISC_FLAG = Flags(UINT, [
"D3D11_QUERY_MISC_PREDICATEHINT",
])
D3D11_QUERY_DESC = Struct("D3D11_QUERY_DESC", [
(D3D11_QUERY, "Query"),
(D3D11_QUERY_MISC_FLAG, "MiscFlags"),
])
ID3D11Query.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_QUERY_DESC), "pDesc")], sideeffects=False),
]
D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", [
(UINT64, "Frequency"),
(BOOL, "Disjoint"),
])
D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D11_QUERY_DATA_PIPELINE_STATISTICS", [
(UINT64, "IAVertices"),
(UINT64, "IAPrimitives"),
(UINT64, "VSInvocations"),
(UINT64, "GSInvocations"),
(UINT64, "GSPrimitives"),
(UINT64, "CInvocations"),
(UINT64, "CPrimitives"),
(UINT64, "PSInvocations"),
(UINT64, "HSInvocations"),
(UINT64, "DSInvocations"),
(UINT64, "CSInvocations"),
])
D3D11_QUERY_DATA_SO_STATISTICS = Struct("D3D11_QUERY_DATA_SO_STATISTICS", [
(UINT64, "NumPrimitivesWritten"),
(UINT64, "PrimitivesStorageNeeded"),
])
D3D11_COUNTER = Enum("D3D11_COUNTER", [
"D3D11_COUNTER_DEVICE_DEPENDENT_0",
])
D3D11_COUNTER_TYPE = Enum("D3D11_COUNTER_TYPE", [
"D3D11_COUNTER_TYPE_FLOAT32",
"D3D11_COUNTER_TYPE_UINT16",
"D3D11_COUNTER_TYPE_UINT32",
"D3D11_COUNTER_TYPE_UINT64",
])
D3D11_COUNTER_DESC = Struct("D3D11_COUNTER_DESC", [
(D3D11_COUNTER, "Counter"),
(UINT, "MiscFlags"),
])
D3D11_COUNTER_INFO = Struct("D3D11_COUNTER_INFO", [
(D3D11_COUNTER, "LastDeviceDependentCounter"),
(UINT, "NumSimultaneousCounters"),
(UINT8, "NumDetectableParallelUnits"),
])
ID3D11Counter.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_COUNTER_DESC), "pDesc")], sideeffects=False),
]
D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum("D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", [
"D3D11_STANDARD_MULTISAMPLE_PATTERN",
"D3D11_CENTER_MULTISAMPLE_PATTERN",
])
D3D11_DEVICE_CONTEXT_TYPE = Enum("D3D11_DEVICE_CONTEXT_TYPE", [
"D3D11_DEVICE_CONTEXT_IMMEDIATE",
"D3D11_DEVICE_CONTEXT_DEFERRED",
])
D3D11_CLASS_INSTANCE_DESC = Struct("D3D11_CLASS_INSTANCE_DESC", [
(UINT, "InstanceId"),
(UINT, "InstanceIndex"),
(UINT, "TypeId"),
(UINT, "ConstantBuffer"),
(UINT, "BaseConstantBufferOffset"),
(UINT, "BaseTexture"),
(UINT, "BaseSampler"),
(BOOL, "Created"),
])
ID3D11ClassInstance.methods += [
StdMethod(Void, "GetClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), "pDesc")], sideeffects=False),
StdMethod(Void, "GetInstanceName", [Out(LPSTR, "pInstanceName"), Out(Pointer(SIZE_T), "pBufferLength")], sideeffects=False),
StdMethod(Void, "GetTypeName", [Out(LPSTR, "pTypeName"), Out(Pointer(SIZE_T), "pBufferLength")], sideeffects=False),
]
ID3D11ClassLinkage.methods += [
StdMethod(HRESULT, "GetClassInstance", [(LPCSTR, "pClassInstanceName"), (UINT, "InstanceIndex"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
StdMethod(HRESULT, "CreateClassInstance", [(LPCSTR, "pClassTypeName"), (UINT, "ConstantBufferOffset"), (UINT, "ConstantVectorOffset"), (UINT, "TextureOffset"), (UINT, "SamplerOffset"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
]
ID3D11CommandList.methods += [
StdMethod(UINT, "GetContextFlags", [], sideeffects=False),
]
D3D11_FEATURE_DATA_THREADING = Struct("D3D11_FEATURE_DATA_THREADING", [
(BOOL, "DriverConcurrentCreates"),
(BOOL, "DriverCommandLists"),
])
D3D11_FEATURE_DATA_DOUBLES = Struct("D3D11_FEATURE_DATA_DOUBLES", [
(BOOL, "DoublePrecisionFloatShaderOps"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT, "OutFormatSupport"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT2", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT2, "OutFormatSupport2"),
])
D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct("D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", [
(BOOL, "ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x"),
])
D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic("D3D11_FEATURE", "Feature", [
("D3D11_FEATURE_THREADING", Pointer(D3D11_FEATURE_DATA_THREADING)),
("D3D11_FEATURE_DOUBLES", Pointer(D3D11_FEATURE_DATA_DOUBLES)),
("D3D11_FEATURE_FORMAT_SUPPORT", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)),
("D3D11_FEATURE_FORMAT_SUPPORT2", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)),
("D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)),
], Blob(Void, "FeatureSupportDataSize"), False)
ID3D11DeviceContext.methods += [
StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D11PixelShader), "pPixelShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D11VertexShader), "pVertexShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]),
StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]),
StdMethod(HRESULT, "Map", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource"), (D3D11_MAP, "MapType"), (D3D11_MAP_FLAG, "MapFlags"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), "pMappedResource")]),
StdMethod(Void, "Unmap", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource")]),
StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D11InputLayout), "pInputLayout")]),
StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppVertexBuffers"), (Array(Const(UINT), "NumBuffers"), "pStrides"), (Array(Const(UINT), "NumBuffers"), "pOffsets")]),
StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D11Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]),
StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D11GeometryShader), "pShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "IASetPrimitiveTopology", [(D3D11_PRIMITIVE_TOPOLOGY, "Topology")]),
StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "Begin", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(Void, "End", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(HRESULT, "GetData", [(ObjPointer(ID3D11Asynchronous), "pAsync"), Out(OpaqueBlob(Void, "DataSize"), "pData"), (UINT, "DataSize"), (D3D11_ASYNC_GETDATA_FLAG, "GetDataFlags")], sideeffects=False),
StdMethod(Void, "SetPredication", [(ObjPointer(ID3D11Predicate), "pPredicate"), (BOOL, "PredicateValue")]),
StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView")]),
StdMethod(Void, "OMSetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumRTVs"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Array(Const(UINT), "NumUAVs"), "pUAVInitialCounts")]),
StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D11BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (UINT, "SampleMask")]),
StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D11DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]),
StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppSOTargets"), (Array(Const(UINT), "NumBuffers"), "pOffsets")]),
StdMethod(Void, "DrawAuto", []),
StdMethod(Void, "DrawIndexedInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "DrawInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "Dispatch", [(UINT, "ThreadGroupCountX"), (UINT, "ThreadGroupCountY"), (UINT, "ThreadGroupCountZ")]),
StdMethod(Void, "DispatchIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "RSSetState", [(ObjPointer(ID3D11RasterizerState), "pRasterizerState")]),
StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D11_VIEWPORT), "NumViewports"), "pViewports")]),
StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D11_RECT), "NumRects"), "pRects")]),
StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D11_BOX)), "pSrcBox")]),
StdMethod(Void, "CopyResource", [(ObjPointer(ID3D11Resource), "pDstResource"), (ObjPointer(ID3D11Resource), "pSrcResource")]),
StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D11_BOX)), "pDstBox"), (Blob(Const(Void), "_calcSubresourceSize(pDstResource, DstSubresource, pDstBox, SrcRowPitch, SrcDepthPitch)"), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]),
StdMethod(Void, "CopyStructureCount", [(ObjPointer(ID3D11Buffer), "pDstBuffer"), (UINT, "DstAlignedByteOffset"), (ObjPointer(ID3D11UnorderedAccessView), "pSrcView")]),
StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D11RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]),
StdMethod(Void, "ClearUnorderedAccessViewUint", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(UINT), 4), "Values")]),
StdMethod(Void, "ClearUnorderedAccessViewFloat", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(FLOAT), 4), "Values")]),
StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (D3D11_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]),
StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D11ShaderResourceView), "pShaderResourceView")]),
StdMethod(Void, "SetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource"), (FLOAT, "MinLOD")]),
StdMethod(FLOAT, "GetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource")], sideeffects=False),
StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]),
StdMethod(Void, "ExecuteCommandList", [(ObjPointer(ID3D11CommandList), "pCommandList"), (BOOL, "RestoreContextState")]),
StdMethod(Void, "HSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSSetShader", [(ObjPointer(ID3D11HullShader), "pHullShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "HSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSSetShader", [(ObjPointer(ID3D11DomainShader), "pDomainShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSSetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Array(Const(UINT), "NumUAVs"), "pUAVInitialCounts")]),
StdMethod(Void, "CSSetShader", [(ObjPointer(ID3D11ComputeShader), "pComputeShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "CSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Array(UINT, "NumBuffers"), "pStrides"), Out(Array(UINT, "NumBuffers"), "pOffsets")]),
StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D11Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]),
StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), "pTopology")], sideeffects=False),
StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]),
StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), Out(Array(ObjPointer(ID3D11RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(Void, "OMGetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), Out(Array(ObjPointer(ID3D11RenderTargetView), "NumRTVs"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), Out(Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(UINT), "pSampleMask")]),
StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]),
StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppSOTargets")]),
StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(Void, "RSGetViewports", [Out(Pointer(UINT), "pNumViewports"), Out(Array(D3D11_VIEWPORT, "*pNumViewports"), "pViewports")]),
StdMethod(Void, "RSGetScissorRects", [Out(Pointer(UINT), "pNumRects"), Out(Array(D3D11_RECT, "*pNumRects"), "pRects")]),
StdMethod(Void, "HSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSGetShader", [Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "HSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSGetShader", [Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "DSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), Out(Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSGetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), Out(Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "CSGetShader", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "CSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), Out(Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), Out(Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "ClearState", []),
StdMethod(Void, "Flush", []),
StdMethod(D3D11_DEVICE_CONTEXT_TYPE, "GetType", [], sideeffects=False),
StdMethod(UINT, "GetContextFlags", [], sideeffects=False),
StdMethod(HRESULT, "FinishCommandList", [(BOOL, "RestoreDeferredContextState"), Out(Pointer(ObjPointer(ID3D11CommandList)), "ppCommandList")]),
]
D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [
"D3D11_CREATE_DEVICE_SINGLETHREADED",
"D3D11_CREATE_DEVICE_DEBUG",
"D3D11_CREATE_DEVICE_SWITCH_TO_REF",
"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS",
"D3D11_CREATE_DEVICE_BGRA_SUPPORT",
])
ID3D11Device.methods += [
StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D11_BUFFER_DESC)), "pDesc"), (Array(Const(D3D11_SUBRESOURCE_DATA), "1"), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Buffer)), "ppBuffer")]),
StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), "pDesc"), (Array(Const(D3D11_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture1D)), "ppTexture1D")]),
StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), "pDesc"), (Array(Const(D3D11_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture2D)), "ppTexture2D")]),
StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), "pDesc"), (Array(Const(D3D11_SUBRESOURCE_DATA), "_getNumSubResources(pDesc)"), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture3D)), "ppTexture3D")]),
StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), "ppSRView")]),
StdMethod(HRESULT, "CreateUnorderedAccessView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), "ppUAView")]),
StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), "ppRTView")]),
StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader")]),
StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (Array(Const(UINT), "NumStrides"), "pBufferStrides"), (UINT, "NumStrides"), (UINT, "RasterizedStream"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader")]),
StdMethod(HRESULT, "CreateHullShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader")]),
StdMethod(HRESULT, "CreateDomainShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader")]),
StdMethod(HRESULT, "CreateComputeShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader")]),
StdMethod(HRESULT, "CreateClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D11_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState")]),
StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState")]),
StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D11_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D11_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D11SamplerState)), "ppSamplerState")]),
StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D11_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D11Query)), "ppQuery")]),
StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D11_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate")]),
StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D11Counter)), "ppCounter")]),
StdMethod(HRESULT, "CreateDeferredContext", [(UINT, "ContextFlags"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppDeferredContext")]),
StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]),
StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D11_FORMAT_SUPPORT), "pFormatSupport")], sideeffects=False),
StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")], sideeffects=False),
StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D11_COUNTER_INFO), "pCounterInfo")], sideeffects=False),
StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D11_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")], sideeffects=False),
StdMethod(HRESULT, "CheckFeatureSupport", [(D3D11_FEATURE, "Feature"), Out(D3D11_FEATURE_DATA, "pFeatureSupportData"), (UINT, "FeatureSupportDataSize")], sideeffects=False),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")], sideeffects=False),
StdMethod(D3D_FEATURE_LEVEL, "GetFeatureLevel", [], sideeffects=False),
StdMethod(D3D11_CREATE_DEVICE_FLAG, "GetCreationFlags", [], sideeffects=False),
StdMethod(HRESULT, "GetDeviceRemovedReason", [], sideeffects=False),
StdMethod(Void, "GetImmediateContext", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdMethod(HRESULT, "SetExceptionMode", [(D3D11_RAISE_FLAG, "RaiseFlags")]),
StdMethod(UINT, "GetExceptionMode", [], sideeffects=False),
]
d3d11 = Module("d3d11")
d3d11.addFunctions([
StdFunction(HRESULT, "D3D11CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdFunction(HRESULT, "D3D11CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
])
d3d11.addInterfaces([
IDXGIFactory1,
IDXGIAdapter1,
IDXGIDevice1,
IDXGIResource,
ID3D11Debug,
ID3D11InfoQueue,
ID3D11SwitchToRef,
])
| |
'''test_jutil.py - test the high-level interface
python-javabridge is licensed under the BSD license. See the
accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2013 Broad Institute
All rights reserved.
'''
import gc
import os
import numpy as np
import threading
import unittest
import sys
import javabridge
# Monkey patch some half-corrent implementations of methods that only
# appeared in Python 2.7.
if not hasattr(unittest.TestCase, 'assertIn'):
unittest.TestCase.assertIn = lambda self, a, b: self.assertTrue(a in b)
if not hasattr(unittest.TestCase, 'assertNotIn'):
unittest.TestCase.assertNotIn = lambda self, a, b: self.assertTrue(a not in b)
if not hasattr(unittest.TestCase, 'assertSequenceEqual'):
unittest.TestCase.assertSequenceEqual = lambda self, a, b: self.assertTrue([aa == bb for aa, bb in zip(a, b)])
class TestJutil(unittest.TestCase):
def setUp(self):
self.env = javabridge.attach()
def tearDown(self):
javabridge.detach()
def test_01_01_to_string(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.to_string(jstring), "Hello, world")
def test_01_02_make_instance(self):
jobject = javabridge.make_instance("java/lang/Object", "()V")
self.assertTrue(javabridge.to_string(jobject).startswith("java.lang.Object"))
def test_01_03_call(self):
jstring = self.env.new_string_utf("Hello, world")
self.assertEqual(javabridge.call(jstring, "charAt", "(I)C", 0), "H")
def test_01_03_01_static_call(self):
result = javabridge.static_call("Ljava/lang/String;", "valueOf",
"(I)Ljava/lang/String;",123)
self.assertEqual(result, "123")
def test_01_04_make_method(self):
env = self.env
class String(object):
def __init__(self):
self.o = env.new_string_utf("Hello, world")
charAt = javabridge.make_method("charAt", "(I)C", "My documentation")
s = String()
self.assertEqual(s.charAt.__doc__, "My documentation")
self.assertEqual(s.charAt(0), "H")
def test_01_05_00_get_static_field(self):
klass = self.env.find_class("java/lang/Short")
self.assertEqual(javabridge.get_static_field(klass, "MAX_VALUE", "S"), 2**15 - 1)
def test_01_05_01_no_field_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'java/lang/Object', "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_02_no_class_for_get_static_field(self):
def fn():
javabridge.get_static_field(
'no/such/class', "field", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_03_set_static_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
test_cases = (
("fs_char", "C", "A"),
("fs_byte", "B", 3),
("fs_short", "S", 15),
("fs_int", "I", 392),
("fs_long", "J", -14),
("fs_float", "F", 1.03),
("fs_double", "D", -889.1),
("fs_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("fs_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_static_field(class_name, field_name, signature, value)
v = javabridge.get_static_field(class_name, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_01_05_04_no_field_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'java/lang/Object', "NoSuchField", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_05_05_no_class_for_set_static_field(self):
def fn():
javabridge.set_static_field(
'no/such/class', "field", "I", 5)
self.assertRaises(javabridge.JavaException, fn)
def test_01_06_get_enumeration_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
keys = javabridge.call(properties, "keys", "()Ljava/util/Enumeration;")
enum = javabridge.get_enumeration_wrapper(keys)
has_java_vm_name = False
while(enum.hasMoreElements()):
key = javabridge.to_string(enum.nextElement())
if key == "java.vm.name":
has_java_vm_name = True
self.assertTrue(has_java_vm_name)
def test_01_07_get_dictionary_wrapper(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
self.assertTrue(d.size() > 10)
self.assertFalse(d.isEmpty())
keys = javabridge.get_enumeration_wrapper(d.keys())
values = javabridge.get_enumeration_wrapper(d.elements())
n_elems = d.size()
for i in range(n_elems):
self.assertTrue(keys.hasMoreElements())
key = javabridge.to_string(keys.nextElement())
self.assertTrue(values.hasMoreElements())
value = javabridge.to_string(values.nextElement())
self.assertEqual(javabridge.to_string(d.get(key)), value)
self.assertFalse(keys.hasMoreElements())
self.assertFalse(values.hasMoreElements())
def test_01_08_jenumeration_to_string_list(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
enum = javabridge.get_enumeration_wrapper(d.keys())
for i in range(d.size()):
key = javabridge.to_string(enum.nextElement())
self.assertEqual(key, keys[i])
def test_01_09_jdictionary_to_string_dictionary(self):
properties = javabridge.static_call("java/lang/System", "getProperties",
"()Ljava/util/Properties;")
d = javabridge.get_dictionary_wrapper(properties)
pyd = javabridge.jdictionary_to_string_dictionary(properties)
keys = javabridge.jenumeration_to_string_list(d.keys())
for key in keys:
value = javabridge.to_string(d.get(key))
self.assertEqual(pyd[key], value)
def test_01_10_make_new(self):
env = self.env
class MyClass:
new_fn = javabridge.make_new("java/lang/Object", '()V')
def __init__(self):
self.new_fn()
my_instance = MyClass()
def test_01_11_class_for_name(self):
c = javabridge.class_for_name('java.lang.String')
name = javabridge.call(c, 'getCanonicalName', '()Ljava/lang/String;')
self.assertEqual(name, 'java.lang.String')
def test_02_01_access_object_across_environments(self):
#
# Create an object in one environment, close the environment,
# open a second environment, then use it and delete it.
#
env = self.env
self.assertTrue(isinstance(env,javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_02_delete_in_environment(self):
env = self.env
self.assertTrue(isinstance(env, javabridge.JB_Env))
class MyInteger:
new_fn = javabridge.make_new("java/lang/Integer",'(I)V')
def __init__(self, value):
self.new_fn(value)
intValue = javabridge.make_method("intValue", '()I')
my_value = 543
my_integer=MyInteger(my_value)
def run(my_integer = my_integer):
env = javabridge.attach()
self.assertEqual(my_integer.intValue(),my_value)
del my_integer
javabridge.detach()
t = threading.Thread(target = run)
t.start()
t.join()
def test_02_03_death_and_resurrection(self):
'''Put an object into another in Java, delete it in Python and recover it'''
np.random.seed(24)
my_value = np.random.randint(0, 1000)
jobj = javabridge.make_instance("java/lang/Integer", "(I)V", my_value)
integer_klass = self.env.find_class("java/lang/Integer")
jcontainer = self.env.make_object_array(1, integer_klass)
self.env.set_object_array_element(jcontainer, 0, jobj)
del jobj
gc.collect()
jobjs = self.env.get_object_array_elements(jcontainer)
jobj = jobjs[0]
self.assertEqual(javabridge.call(jobj, "intValue", "()I"), my_value)
def test_02_04_non_java_thread_deletes_it(self):
'''Delete a Java object on a not-Java thread'''
refs = [javabridge.make_instance("java/lang/Integer", "(I)V", 5)]
def run():
del refs[0]
gc.collect()
t = threading.Thread(target = run)
t.start()
t.join()
def test_03_01_cw_from_class(self):
'''Get a class wrapper from a class'''
c = javabridge.get_class_wrapper(javabridge.make_instance('java/lang/Integer', '(I)V',
14))
def test_03_02_cw_from_string(self):
'''Get a class wrapper from a string'''
c = javabridge.get_class_wrapper("java.lang.Number")
def test_03_03_cw_get_classes(self):
c = javabridge.get_class_wrapper('java.lang.Number')
classes = c.getClasses()
self.assertEqual(len(javabridge.get_env().get_object_array_elements(classes)), 0)
def test_03_04_cw_get_annotation(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotation = c.getAnnotation(javabridge.class_for_name('java.lang.Deprecated'))
self.assertTrue(annotation is not None)
def test_03_05_cw_get_annotations(self):
c = javabridge.get_class_wrapper('java.security.Identity')
annotations = c.getAnnotations()
annotations = javabridge.get_env().get_object_array_elements(annotations)
self.assertEqual(len(annotations), 1)
self.assertTrue(javabridge.to_string(annotations[0]).startswith('@java.lang.Deprecated'))
def test_03_06_cw_get_constructors(self):
c = javabridge.get_class_wrapper('java.lang.String')
constructors = c.getConstructors()
constructors = javabridge.get_env().get_object_array_elements(constructors)
self.assertEqual(len(constructors), 15)
def test_03_07_cw_get_fields(self):
c = javabridge.get_class_wrapper('java.lang.String')
fields = c.getFields()
fields = javabridge.get_env().get_object_array_elements(fields)
self.assertEqual(len(fields), 1)
self.assertEqual(javabridge.call(fields[0], 'getName', '()Ljava/lang/String;'),
"CASE_INSENSITIVE_ORDER")
def test_03_08_cw_get_field(self):
c = javabridge.get_class_wrapper('java.lang.String')
field = c.getField('CASE_INSENSITIVE_ORDER')
modifiers = javabridge.call(field, 'getModifiers', '()I')
static = javabridge.get_static_field('java/lang/reflect/Modifier','STATIC','I')
self.assertEqual((modifiers & static), static)
def test_03_09_cw_get_method(self):
sclass = javabridge.class_for_name('java.lang.String')
iclass = javabridge.get_static_field('java/lang/Integer', 'TYPE',
'Ljava/lang/Class;')
c = javabridge.get_class_wrapper('java.lang.String')
m = c.getMethod('charAt', [ iclass ])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')), 'char')
m = c.getMethod('concat', [ sclass])
self.assertEqual(javabridge.to_string(javabridge.call(m, 'getReturnType', '()Ljava/lang/Class;')),
'class java.lang.String')
def test_03_10_cw_get_methods(self):
c = javabridge.get_class_wrapper('java.lang.String')
mmm = javabridge.get_env().get_object_array_elements(c.getMethods())
self.assertTrue(any([javabridge.call(m, 'getName', '()Ljava/lang/String;') == 'concat'
for m in mmm]))
def test_03_11_cw_get_constructor(self):
c = javabridge.get_class_wrapper('java.lang.String')
sclass = javabridge.class_for_name('java.lang.String')
constructor = c.getConstructor([sclass])
self.assertEqual(javabridge.call(constructor, 'getName', '()Ljava/lang/String;'),
'java.lang.String')
def test_04_01_field_get(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
v = f.get(None)
self.assertEqual(javabridge.to_string(v), '127')
def test_04_02_field_name(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
self.assertEqual(f.getName(), 'MAX_VALUE')
def test_04_03_field_type(self):
c = javabridge.get_class_wrapper('java.lang.Byte')
f = javabridge.get_field_wrapper(c.getField('MAX_VALUE'))
t = f.getType()
self.assertEqual(javabridge.to_string(t), 'byte')
def test_05_01_run_script(self):
self.assertEqual(javabridge.run_script("2+2"), 4)
def test_05_02_run_script_with_inputs(self):
self.assertEqual(javabridge.run_script("a+b", bindings_in={"a":2, "b":3}), 5)
def test_05_03_run_script_with_outputs(self):
outputs = { "result": None}
javabridge.run_script("var result = 2+2;", bindings_out=outputs)
self.assertEqual(outputs["result"], 4)
def test_06_01_execute_asynch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"))
def test_06_02_execute_synch_main(self):
javabridge.execute_runnable_in_main_thread(javabridge.run_script(
"new java.lang.Runnable() { run:function() {}};"), True)
def test_06_03_future_main(self):
c = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
result = javabridge.execute_future_in_main_thread(
javabridge.make_future_task(c, fn_post_process=javabridge.unwrap_javascript))
self.assertEqual(result, 4)
def test_07_01_wrap_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
self.assertFalse(wfuture.isDone())
self.assertFalse(wfuture.isCancelled())
wfuture.run()
self.assertTrue(wfuture.isDone())
self.assertEqual(wfuture.get(), 4)
def test_07_02_cancel_future(self):
future = javabridge.run_script("""
new java.util.concurrent.FutureTask(
new java.util.concurrent.Callable() {
call: function() { return 2+2; }});""")
wfuture = javabridge.get_future_wrapper(
future, fn_post_process=javabridge.unwrap_javascript)
wfuture.cancel(True)
self.assertTrue(wfuture.isCancelled())
self.assertRaises(javabridge.JavaException, wfuture.get)
def test_07_03_make_future_task_from_runnable(self):
future = javabridge.make_future_task(
javabridge.run_script("new java.lang.Runnable() { run: function() {}};"),
11)
future.run()
self.assertEqual(javabridge.call(future.get(), "intValue", "()I"), 11)
def test_07_04_make_future_task_from_callable(self):
call_able = javabridge.run_script("""
new java.util.concurrent.Callable() {
call: function() { return 2+2; }};""")
future = javabridge.make_future_task(
call_able, fn_post_process=javabridge.unwrap_javascript)
future.run()
self.assertEqual(future.get(), 4)
def test_08_01_wrap_collection(self):
c = javabridge.make_instance("java/util/HashSet", "()V")
w = javabridge.get_collection_wrapper(c)
self.assertFalse(hasattr(w, "addI"))
self.assertEqual(w.size(), 0)
self.assertEqual(len(w), 0)
self.assertTrue(w.isEmpty())
def test_08_02_add(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
self.assertTrue(c.add("Foo"))
self.assertEqual(len(c), 1)
self.assertFalse(c.isEmpty())
def test_08_03_contains(self):
c = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c.add("Foo")
self.assertTrue(c.contains("Foo"))
self.assertFalse(c.contains("Bar"))
self.assertIn("Foo", c)
self.assertNotIn("Bar", c)
def test_08_04_addAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2.addAll(c1.o)
self.assertIn("Foo", c2)
def test_08_05__add__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c3 = c1 + c2
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c3)
c4 = c3 + ["Hello", "World"]
self.assertIn("Hello", c4)
self.assertIn("World", c4)
def test_08_06__iadd__(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
c2 += c1
for k in ("Foo", "Bar", "Baz"):
self.assertIn(k, c2)
c2 += ["Hello", "World"]
self.assertIn("Hello", c2)
self.assertIn("World", c2)
def test_08_07_contains_all(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Baz")
self.assertFalse(c2.containsAll(c1.o))
c2 += c1
self.assertTrue(c2.containsAll(c1.o))
def test_08_08_remove(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c1.remove("Foo")
self.assertNotIn("Foo", c1)
def test_08_09_removeAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.removeAll(c2)
self.assertNotIn("Foo", c1)
def test_08_10_retainAll(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
c2 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c2.add("Foo")
c1.retainAll(c2)
self.assertIn("Foo", c1)
self.assertNotIn("Bar", c1)
def test_08_11_toArray(self):
c1 = javabridge.get_collection_wrapper(javabridge.make_instance("java/util/HashSet", "()V"))
c1.add("Foo")
c1.add("Bar")
result = [javabridge.to_string(x) for x in c1.toArray()]
self.assertIn("Foo", result)
self.assertIn("Bar", result)
def test_08_12_make_list(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertSequenceEqual(l, ["Foo", "Bar"])
self.assertTrue(hasattr(l, "addI"))
def test_08_13_addI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addI(1, "Baz")
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_14_addAllI(self):
l = javabridge.make_list(["Foo", "Bar"])
l.addAllI(1, javabridge.make_list(["Baz"]))
self.assertSequenceEqual(l, ["Foo", "Baz", "Bar"])
def test_08_15_indexOf(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.indexOf("Bar"), 1)
self.assertEqual(l.lastIndexOf("Foo"), 0)
def test_08_16_get(self):
l = javabridge.make_list(["Foo", "Bar"])
self.assertEqual(l.get(1), "Bar")
def test_08_17_set(self):
l = javabridge.make_list(["Foo", "Bar"])
l.set(1, "Baz")
self.assertEqual(l.get(1), "Baz")
def test_08_18_subList(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertSequenceEqual(l.subList(1, 3), ["Bar", "Baz"])
def test_08_19__getitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz", "Hello", "World"])
self.assertEqual(l[1], "Bar")
self.assertEqual(l[-2], "Hello")
self.assertSequenceEqual(l[1:3], ["Bar", "Baz"])
self.assertSequenceEqual(l[::3], ["Foo", "Hello"])
def test_08_20__setitem__(self):
l = javabridge.make_list(["Foo", "Bar"])
l[1] = "Baz"
self.assertEqual(l.get(1), "Baz")
def test_08_21__delitem__(self):
l = javabridge.make_list(["Foo", "Bar", "Baz"])
del l[1]
self.assertSequenceEqual(l, ["Foo", "Baz"])
def test_09_01_00_get_field(self):
o = javabridge.make_instance("org/cellprofiler/javabridge/test/RealRect", "(DDDD)V", 1, 2, 3, 4)
self.assertEqual(javabridge.get_field(o, "x", "D"), 1)
def test_09_02_get_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.get_field(o, "NoSuchField", "I")
self.assertRaises(javabridge.JavaException, fn)
def test_09_03_set_field(self):
class_name = "org/cellprofiler/javabridge/test/RealRect"
o = javabridge.make_instance(class_name, "()V")
test_cases = (
("f_char", "C", "A"),
("f_byte", "B", 3),
("f_short", "S", 15),
("f_int", "I", 392),
("f_long", "J", -14),
("f_float", "F", 1.03),
("f_double", "D", -889.1),
("f_object", "Ljava/lang/Object;",
javabridge.make_instance("java/lang/Integer", "(I)V", 15)),
("f_object", "Ljava/lang/Object;", None))
for field_name, signature, value in test_cases:
javabridge.set_field(o, field_name, signature, value)
v = javabridge.get_field(o, field_name, signature)
if isinstance(value, float):
self.assertAlmostEqual(v, value)
elif isinstance(value, javabridge.JB_Object):
self.assertTrue(javabridge.call(
value, "equals", "(Ljava/lang/Object;)Z", v))
else:
self.assertEqual(v, value)
def test_09_04_set_field_no_such_field(self):
def fn():
o = javabridge.make_instance("java/lang/Object", "()V")
javabridge.set_field(o, "NoSuchField", "I", 1)
self.assertRaises(javabridge.JavaException, fn)
def test_10_01_iterate_java_on_non_iterator(self):
#
# Regression test of issue #11: the expression below segfaulted
#
def fn():
list(javabridge.iterate_java(javabridge.make_list(range(10)).o))
self.assertRaises(javabridge.JavaError, fn)
def test_10_01_class_path(self):
for arg in ['-cp', '-classpath', '-Djava.class.path=foo']:
self.assertRaises(ValueError, lambda: javabridge.start_vm([arg]))
def test_11_01_make_run_dictionary(self):
from javabridge.jutil import make_run_dictionary
o = javabridge.make_instance("java/util/Hashtable", "()V")
a = javabridge.make_instance("java/util/ArrayList", "()V")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"foo", "bar")
javabridge.call(
o, "put",
"(Ljava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
"baz", a)
d = make_run_dictionary(o)
self.assertIn("foo", d)
self.assertEquals(d["foo"], "bar")
self.assertIn("baz", d)
self.assertTrue(javabridge.call(d["baz"], "equals",
"(Ljava/lang/Object;)Z", a))
def test_12_01_jref(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
alt = javabridge.redeem_jref(ref_id)
o["bar"] = "bunny"
for key in o:
self.assertTrue(key in alt)
self.assertEqual(o[key], alt[key])
def test_12_02_jref_lost(self):
o = dict(foo="bar", baz="2")
ref_id, ref = javabridge.create_jref(o)
del ref
self.assertRaises(KeyError, javabridge.redeem_jref, ref_id)
def test_12_03_jref_create_and_lock(self):
cpython = javabridge.JClassWrapper(
'org.cellprofiler.javabridge.CPython')()
d = javabridge.JClassWrapper('java.util.Hashtable')()
result = javabridge.JClassWrapper('java.util.ArrayList')()
d.put("result", result)
ref_self = javabridge.create_and_lock_jref(self)
d.put("self", ref_self)
cpython.execute(
'import javabridge\n'
'x = { "foo":"bar"}\n'
'ref_id = javabridge.create_and_lock_jref(x)\n'
'javabridge.JWrapper(result).add(ref_id)', d, d)
cpython.execute(
'import javabridge\n'
'ref_id = javabridge.JWrapper(result).get(0)\n'
'self = javabridge.redeem_jref(javabridge.to_string(self))\n'
'self.assertEqual(javabridge.redeem_jref(ref_id)["foo"], "bar")\n'
'javabridge.unlock_jref(ref_id)', d, d)
javabridge.unlock_jref(ref_self)
self.assertRaises(KeyError, javabridge.redeem_jref, ref_self)
def test_13_01_unicode_arg(self):
# On 2.x, check that a unicode argument is properly prepared
s = u"Hola ni\u00F1os"
s1, s2 = s.split(" ")
if sys.version_info.major == 2:
s2 = s2.encode("utf-8")
env = javabridge.get_env()
js1 = env.new_string(s1+" ")
result = javabridge.call(
js1, "concat", "(Ljava/lang/String;)Ljava/lang/String;", s2)
self.assertEqual(s, result)
if __name__=="__main__":
unittest.main()
| |
from common_fixtures import * # NOQA
RCCOMMANDS_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/rccmds/v2')
logger = logging.getLogger(__name__)
start_project_str = "Starting"
if_compose_data_files = pytest.mark.skipif(
not os.path.isdir(RCCOMMANDS_SUBDIR),
reason='Rancher compose files directory location not set/does not Exist')
@if_compose_data_files
def test_rancher_compose_create_service(client,
rancher_compose_container):
# This method tests the rancher compose create and up commands
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"create", "Creating stack", "rc1.yml")
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
assert service.scale == 3
assert service.name == "test1"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_stop(client,
rancher_compose_container):
# This method tests the rancher compose start and stop commands
# Bug #4887 has been filed
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"stop", "Stopped", rancher_compose="rc1.yml")
# Note: We add a sleep as the stop command does not wait until complete
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(client, RCCOMMANDS_SUBDIR,
"dc1.yml", env_name, "start -d",
"Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_down(client,
rancher_compose_container):
# This method tests the rancher compose start and down commands
env_name = random_str().replace("-", "")
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"down", "Stopped", "rc1.yml")
# Note: We add a sleep as the down command does not wait until it completes
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart(client,
rancher_compose_container):
# This method tests the rancher compose restart command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart_bat_inter(client,
rancher_compose_container):
# This method tests restart command with batchsize and inteval options
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart --batch-size 2 --interval 100", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_delete(client,
rancher_compose_container):
# This method tests the delete command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"rm -f", "Deleting", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "removed"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_scale(client,
rancher_compose_container):
# This method tests the scale command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
# Issue a command to scale up the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=4", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are incremented correctly
assert len(container_list) == 4
for container in container_list:
assert container.state == "running"
# Issue a command to scale down the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=3", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are decremented correctly
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_security(client,
rancher_compose_container,
socat_containers):
# This method tests the options in security tab in the UI
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["Privileged"]
assert inspect["HostConfig"]["Memory"] == 104857600
assert inspect["HostConfig"]["CpuShares"] == 256
assert inspect["HostConfig"]["CapAdd"] == ["AUDIT_CONTROL",
"AUDIT_WRITE"]
assert inspect["HostConfig"]["CapDrop"] == ["BLOCK_SUSPEND",
"CHOWN"]
assert inspect["Config"]["Hostname"] == "rancherhost"
assert inspect["HostConfig"]["PidMode"] == "host"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_log_driver(client,
rancher_compose_container,
socat_containers):
# This test case fails bcos of bug #4773
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running" + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["LogConfig"]["Type"] == "syslog"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_network(client,
rancher_compose_container,
socat_containers):
# This method tests the options in Network tab in the UI
hostname_override = "io.rancher.container.hostname_override"
requested_ip = "io.rancher.container.requested_ip"
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc4.yml", env_name,
"up -d", start_project_str, "rc4.yml")
env, service = get_env_service_by_name(client, env_name, "test4")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service,
{"testrc": "RANCHER_COMPOSE"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.requested_ip":
"209.243.140.21"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.hostname_override":
"container_name"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["Config"]["Domainname"] == "xyz.com"
assert \
inspect["Config"]["Labels"][hostname_override] \
== "container_name"
assert inspect["Config"]["Labels"][requested_ip] == "209.243.140.21"
dns_list = inspect["HostConfig"]["Dns"]
dnssearch_list = inspect["HostConfig"]["DnsSearch"]
assert "209.243.150.21" in dns_list
assert "www.google.com" in dnssearch_list
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_volume(client,
rancher_compose_container,
socat_containers):
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc5.yml", env_name,
"up -d", start_project_str, "rc5.yml")
env, service = get_env_service_by_name(client, env_name, "test5")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert "testvol:/home:rw" in inspect["HostConfig"]["Binds"]
delete_all(client, [env])
| |
#!/usr/bin/python
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
RGB_PATTERN = r"^\s*rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*\)\s*$"
RGB_PCT_PATTERN = r"^\s*rgb\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*\)\s*$"
RGBA_PATTERN = r"^\s*rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
RGBA_PCT_PATTERN = r"^\s*rgba\(\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(\d{1,3}|\d{1,2}\.\d+)%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
HEX_PATTERN = r"#([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})([A-Fa-f0-9]{2})"
HEX3_PATTERN = r"#([A-Fa-f0-9])([A-Fa-f0-9])([A-Fa-f0-9])"
HSL_PATTERN = r"^\s*hsl\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*\)\s*$"
HSLA_PATTERN = r"^\s*hsla\(\s*(\d{1,3})\s*,\s*(\d{1,3})%\s*,\s*(\d{1,3})%\s*,\s*(0|1|0\.\d+)\s*\)\s*$"
class Color(object):
"""
Color conversion support class
Example:
.. code-block:: python
from selenium.webdriver.support.color import Color
print Color.from_string('#00ff33').rgba
print Color.from_string('rgb(1, 255, 3)').hex
print Color.from_string('blue').rgba
"""
@staticmethod
def from_string(str_):
import re
class Matcher(object):
def __init__(self):
self.match_obj = None
def match(self, pattern, str_):
self.match_obj = re.match(pattern, str_)
return self.match_obj
@property
def groups(self):
return () if self.match_obj is None else self.match_obj.groups()
m = Matcher()
if m.match(RGB_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGB_PCT_PATTERN, str_):
rgb = tuple([float(each) / 100 * 255 for each in m.groups])
return Color(*rgb)
elif m.match(RGBA_PATTERN, str_):
return Color(*m.groups)
elif m.match(RGBA_PCT_PATTERN, str_):
rgba = tuple([float(each) / 100 * 255 for each in m.groups[:3]] + [m.groups[3]])
return Color(*rgba)
elif m.match(HEX_PATTERN, str_):
rgb = tuple([int(each, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HEX3_PATTERN, str_):
rgb = tuple([int(each * 2, 16) for each in m.groups])
return Color(*rgb)
elif m.match(HSL_PATTERN, str_) or m.match(HSLA_PATTERN, str_):
return Color._from_hsl(*m.groups)
elif str_.upper() in Colors.keys():
return Colors[str_.upper()]
else:
raise ValueError("Could not convert %s into color" % str_)
@staticmethod
def _from_hsl(h, s, l, a=1):
h = float(h) / 360
s = float(s) / 100
l = float(l) / 100
if s == 0:
r = l
g = r
b = r
else:
luminocity2 = l * (1 + s) if l < 0.5 else l + s - l * s
luminocity1 = 2 * l - luminocity2
def hue_to_rgb(lum1, lum2, hue):
if hue < 0.0:
hue += 1
if hue > 1.0:
hue -= 1
if hue < 1.0 / 6.0:
return (lum1 + (lum2 - lum1) * 6.0 * hue)
elif hue < 1.0 / 2.0:
return lum2
elif hue < 2.0 / 3.0:
return lum1 + (lum2 - lum1) * ((2.0 / 3.0) - hue) * 6.0
else:
return lum1
r = hue_to_rgb(luminocity1, luminocity2, h + 1.0 / 3.0)
g = hue_to_rgb(luminocity1, luminocity2, h)
b = hue_to_rgb(luminocity1, luminocity2, h - 1.0 / 3.0)
return Color(r * 256, g * 256, b * 256, a)
def __init__(self, red, green, blue, alpha=1):
self.red = int(red)
self.green = int(green)
self.blue = int(blue)
self.alpha = float(alpha) or 0
@property
def rgb(self):
return "rgb(%d, %d, %d)" % (self.red, self.green, self.blue)
@property
def rgba(self):
a = "1" if self.alpha == 1 else str(self.alpha)
return "rgba(%d, %d, %d, %s)" % (self.red, self.green, self.blue, a)
@property
def hex(self):
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def __eq__(self, other):
if isinstance(other, Color):
return self.rgba == other.rgba
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
def __hash__(self):
return hash((self.red, self.green, self.blue, self.alpha))
# Basic, extended and transparent colour keywords as defined by the W3C HTML4 spec
# See http://www.w3.org/TR/css3-color/#html4
Colors = {
"TRANSPARENT": Color(0, 0, 0, 0),
"ALICEBLUE": Color(240, 248, 255),
"ANTIQUEWHITE": Color(250, 235, 215),
"AQUA": Color(0, 255, 255),
"AQUAMARINE": Color(127, 255, 212),
"AZURE": Color(240, 255, 255),
"BEIGE": Color(245, 245, 220),
"BISQUE": Color(255, 228, 196),
"BLACK": Color(0, 0, 0),
"BLANCHEDALMOND": Color(255, 235, 205),
"BLUE": Color(0, 0, 255),
"BLUEVIOLET": Color(138, 43, 226),
"BROWN": Color(165, 42, 42),
"BURLYWOOD": Color(222, 184, 135),
"CADETBLUE": Color(95, 158, 160),
"CHARTREUSE": Color(127, 255, 0),
"CHOCOLATE": Color(210, 105, 30),
"CORAL": Color(255, 127, 80),
"CORNFLOWERBLUE": Color(100, 149, 237),
"CORNSILK": Color(255, 248, 220),
"CRIMSON": Color(220, 20, 60),
"CYAN": Color(0, 255, 255),
"DARKBLUE": Color(0, 0, 139),
"DARKCYAN": Color(0, 139, 139),
"DARKGOLDENROD": Color(184, 134, 11),
"DARKGRAY": Color(169, 169, 169),
"DARKGREEN": Color(0, 100, 0),
"DARKGREY": Color(169, 169, 169),
"DARKKHAKI": Color(189, 183, 107),
"DARKMAGENTA": Color(139, 0, 139),
"DARKOLIVEGREEN": Color(85, 107, 47),
"DARKORANGE": Color(255, 140, 0),
"DARKORCHID": Color(153, 50, 204),
"DARKRED": Color(139, 0, 0),
"DARKSALMON": Color(233, 150, 122),
"DARKSEAGREEN": Color(143, 188, 143),
"DARKSLATEBLUE": Color(72, 61, 139),
"DARKSLATEGRAY": Color(47, 79, 79),
"DARKSLATEGREY": Color(47, 79, 79),
"DARKTURQUOISE": Color(0, 206, 209),
"DARKVIOLET": Color(148, 0, 211),
"DEEPPINK": Color(255, 20, 147),
"DEEPSKYBLUE": Color(0, 191, 255),
"DIMGRAY": Color(105, 105, 105),
"DIMGREY": Color(105, 105, 105),
"DODGERBLUE": Color(30, 144, 255),
"FIREBRICK": Color(178, 34, 34),
"FLORALWHITE": Color(255, 250, 240),
"FORESTGREEN": Color(34, 139, 34),
"FUCHSIA": Color(255, 0, 255),
"GAINSBORO": Color(220, 220, 220),
"GHOSTWHITE": Color(248, 248, 255),
"GOLD": Color(255, 215, 0),
"GOLDENROD": Color(218, 165, 32),
"GRAY": Color(128, 128, 128),
"GREY": Color(128, 128, 128),
"GREEN": Color(0, 128, 0),
"GREENYELLOW": Color(173, 255, 47),
"HONEYDEW": Color(240, 255, 240),
"HOTPINK": Color(255, 105, 180),
"INDIANRED": Color(205, 92, 92),
"INDIGO": Color(75, 0, 130),
"IVORY": Color(255, 255, 240),
"KHAKI": Color(240, 230, 140),
"LAVENDER": Color(230, 230, 250),
"LAVENDERBLUSH": Color(255, 240, 245),
"LAWNGREEN": Color(124, 252, 0),
"LEMONCHIFFON": Color(255, 250, 205),
"LIGHTBLUE": Color(173, 216, 230),
"LIGHTCORAL": Color(240, 128, 128),
"LIGHTCYAN": Color(224, 255, 255),
"LIGHTGOLDENRODYELLOW": Color(250, 250, 210),
"LIGHTGRAY": Color(211, 211, 211),
"LIGHTGREEN": Color(144, 238, 144),
"LIGHTGREY": Color(211, 211, 211),
"LIGHTPINK": Color(255, 182, 193),
"LIGHTSALMON": Color(255, 160, 122),
"LIGHTSEAGREEN": Color(32, 178, 170),
"LIGHTSKYBLUE": Color(135, 206, 250),
"LIGHTSLATEGRAY": Color(119, 136, 153),
"LIGHTSLATEGREY": Color(119, 136, 153),
"LIGHTSTEELBLUE": Color(176, 196, 222),
"LIGHTYELLOW": Color(255, 255, 224),
"LIME": Color(0, 255, 0),
"LIMEGREEN": Color(50, 205, 50),
"LINEN": Color(250, 240, 230),
"MAGENTA": Color(255, 0, 255),
"MAROON": Color(128, 0, 0),
"MEDIUMAQUAMARINE": Color(102, 205, 170),
"MEDIUMBLUE": Color(0, 0, 205),
"MEDIUMORCHID": Color(186, 85, 211),
"MEDIUMPURPLE": Color(147, 112, 219),
"MEDIUMSEAGREEN": Color(60, 179, 113),
"MEDIUMSLATEBLUE": Color(123, 104, 238),
"MEDIUMSPRINGGREEN": Color(0, 250, 154),
"MEDIUMTURQUOISE": Color(72, 209, 204),
"MEDIUMVIOLETRED": Color(199, 21, 133),
"MIDNIGHTBLUE": Color(25, 25, 112),
"MINTCREAM": Color(245, 255, 250),
"MISTYROSE": Color(255, 228, 225),
"MOCCASIN": Color(255, 228, 181),
"NAVAJOWHITE": Color(255, 222, 173),
"NAVY": Color(0, 0, 128),
"OLDLACE": Color(253, 245, 230),
"OLIVE": Color(128, 128, 0),
"OLIVEDRAB": Color(107, 142, 35),
"ORANGE": Color(255, 165, 0),
"ORANGERED": Color(255, 69, 0),
"ORCHID": Color(218, 112, 214),
"PALEGOLDENROD": Color(238, 232, 170),
"PALEGREEN": Color(152, 251, 152),
"PALETURQUOISE": Color(175, 238, 238),
"PALEVIOLETRED": Color(219, 112, 147),
"PAPAYAWHIP": Color(255, 239, 213),
"PEACHPUFF": Color(255, 218, 185),
"PERU": Color(205, 133, 63),
"PINK": Color(255, 192, 203),
"PLUM": Color(221, 160, 221),
"POWDERBLUE": Color(176, 224, 230),
"PURPLE": Color(128, 0, 128),
"RED": Color(255, 0, 0),
"ROSYBROWN": Color(188, 143, 143),
"ROYALBLUE": Color(65, 105, 225),
"SADDLEBROWN": Color(139, 69, 19),
"SALMON": Color(250, 128, 114),
"SANDYBROWN": Color(244, 164, 96),
"SEAGREEN": Color(46, 139, 87),
"SEASHELL": Color(255, 245, 238),
"SIENNA": Color(160, 82, 45),
"SILVER": Color(192, 192, 192),
"SKYBLUE": Color(135, 206, 235),
"SLATEBLUE": Color(106, 90, 205),
"SLATEGRAY": Color(112, 128, 144),
"SLATEGREY": Color(112, 128, 144),
"SNOW": Color(255, 250, 250),
"SPRINGGREEN": Color(0, 255, 127),
"STEELBLUE": Color(70, 130, 180),
"TAN": Color(210, 180, 140),
"TEAL": Color(0, 128, 128),
"THISTLE": Color(216, 191, 216),
"TOMATO": Color(255, 99, 71),
"TURQUOISE": Color(64, 224, 208),
"VIOLET": Color(238, 130, 238),
"WHEAT": Color(245, 222, 179),
"WHITE": Color(255, 255, 255),
"WHITESMOKE": Color(245, 245, 245),
"YELLOW": Color(255, 255, 0),
"YELLOWGREEN": Color(154, 205, 50)
}
| |
# -*- coding: utf-8 -*-
"""
pyte.screens
~~~~~~~~~~~~
This module provides classes for terminal screens, currently
it contains three screens with different features:
* :class:`~pyte.screens.Screen` -- base screen implementation,
which handles all the core escape sequences, recognized by
:class:`~pyte.streams.Stream`.
* If you need a screen to keep track of the changed lines
(which you probably do need) -- use
:class:`~pyte.screens.DiffScreen`.
* If you also want a screen to collect history and allow
pagination -- :class:`pyte.screen.HistoryScreen` is here
for ya ;)
.. note:: It would be nice to split those features into mixin
classes, rather than subclasses, but it's not obvious
how to do -- feel free to submit a pull request.
:copyright: (c) 2011-2013 Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals, division
import copy
import math
import operator
import sys
from collections import deque, namedtuple
from itertools import islice, repeat
from . import modes as mo, graphics as g, charsets as cs
if sys.version_info[0] == 2:
from future_builtins import map
range = xrange
def take(n, iterable):
"""Returns first n items of the iterable as a list."""
return list(islice(iterable, n))
#: A container for screen's scroll margins.
Margins = namedtuple("Margins", "top bottom")
#: A container for savepoint, created on :data:`~pyte.escape.DECSC`.
Savepoint = namedtuple("Savepoint", [
"cursor",
"g0_charset",
"g1_charset",
"charset",
"origin",
"wrap"
])
#: A container for a single character, field names are *hopefully*
#: self-explanatory.
_Char = namedtuple("_Char", [
"data",
"fg",
"bg",
"bold",
"italics",
"underscore",
"strikethrough",
"reverse",
])
class Char(_Char):
"""A wrapper around :class:`_Char`, providing some useful defaults
for most of the attributes.
"""
__slots__ = ()
def __new__(cls, data, fg="default", bg="default", bold=False,
italics=False, underscore=False, reverse=False,
strikethrough=False):
return _Char.__new__(cls, data, fg, bg, bold, italics, underscore,
reverse, strikethrough)
class Cursor(object):
"""Screen cursor.
:param int x: horizontal cursor position.
:param int y: vertical cursor position.
:param pyte.screens.Char attrs: cursor attributes (see
:meth:`~pyte.screens.Screen.selectel_graphic_rendition`
for details).
"""
def __init__(self, x, y, attrs=Char(" ")):
self.x, self.y, self.attrs, self.hidden = x, y, attrs, False
class Screen(object):
"""
A screen is an in-memory matrix of characters that represents the
screen display of the terminal. It can be instantiated on it's own
and given explicit commands, or it can be attached to a stream and
will respond to events.
.. attribute:: buffer
A ``lines x columns`` :class:`~pyte.screens.Char` matrix.
.. attribute:: cursor
Reference to the :class:`~pyte.screens.Cursor` object, holding
cursor position and attributes.
.. attribute:: margins
Top and bottom screen margins, defining the scrolling region;
the actual values are top and bottom line.
.. attribute:: charset
Current charset number; can be either ``0`` or ``1`` for `G0`
and `G1` respectively, note that `G0` is activated by default.
.. note::
According to ``ECMA-48`` standard, **lines and columnns are
1-indexed**, so, for instance ``ESC [ 10;10 f`` really means
-- move cursor to position (9, 9) in the display matrix.
.. versionchanged:: 0.4.7
.. warning::
:data:`~pyte.modes.LNM` is reset by default, to match VT220
specification.
.. versionchanged:: 0.4.8
.. warning::
If `DECAWM` mode is set than a cursor will be wrapped to the
**beginning* of the next line, which is the behaviour described
in ``man console_codes``.
.. seealso::
`Standard ECMA-48, Section 6.1.1 \
<http://www.ecma-international.org/publications
/standards/Ecma-048.htm>`_
For a description of the presentational component, implemented
by ``Screen``.
"""
#: A plain empty character with default foreground and background
#: colors.
default_char = Char(data=" ", fg="default", bg="default")
#: An inifinite sequence of default characters, used for populating
#: new lines and columns.
default_line = repeat(default_char)
def __init__(self, columns, lines):
self.savepoints = []
self.lines, self.columns = lines, columns
self.buffer = []
self.reset()
def __repr__(self):
return ("{0}({1}, {2})".format(self.__class__.__name__,
self.columns, self.lines))
def __before__(self, command):
"""Hook, called **before** a command is dispatched to the
:class:`Screen` instance.
:param str command: command name, for example ``"LINEFEED"``.
"""
def __after__(self, command):
"""Hook, called **after** a command is dispatched to the
:class:`Screen` instance.
:param str command: command name, for example ``"LINEFEED"``.
"""
@property
def size(self):
"""Returns screen size -- ``(lines, columns)``"""
return self.lines, self.columns
@property
def display(self):
"""Returns a :func:`list` of screen lines as unicode strings."""
return ["".join(map(operator.attrgetter("data"), line))
for line in self.buffer]
def reset(self):
"""Resets the terminal to its initial state.
* Scroll margins are reset to screen boundaries.
* Cursor is moved to home location -- ``(0, 0)`` and its
attributes are set to defaults (see :attr:`default_char`).
* Screen is cleared -- each character is reset to
:attr:`default_char`.
* Tabstops are reset to "every eight columns".
.. note::
Neither VT220 nor VT102 manuals mentioned that terminal modes
and tabstops should be reset as well, thanks to
:manpage:`xterm` -- we now know that.
"""
self.buffer[:] = (take(self.columns, self.default_line)
for _ in range(self.lines))
self.mode = set([mo.DECAWM, mo.DECTCEM])
self.margins = Margins(0, self.lines - 1)
# According to VT220 manual and ``linux/drivers/tty/vt.c``
# the default G0 charset is latin-1, but for reasons unknown
# latin-1 breaks ascii-graphics; so G0 defaults to cp437.
self.charset = 0
self.g0_charset = cs.IBMPC_MAP
self.g1_charset = cs.VT100_MAP
# From ``man terminfo`` -- "... hardware tabs are initially
# set every `n` spaces when the terminal is powered up. Since
# we aim to support VT102 / VT220 and linux -- we use n = 8.
self.tabstops = set(range(7, self.columns, 8))
self.cursor = Cursor(0, 0)
self.cursor_position()
def resize(self, lines=None, columns=None):
"""Resize the screen to the given dimensions.
If the requested screen size has more lines than the existing
screen, lines will be added at the bottom. If the requested
size has less lines than the existing screen lines will be
clipped at the top of the screen. Similarly, if the existing
screen has less columns than the requested screen, columns will
be added at the right, and if it has more -- columns will be
clipped at the right.
.. note:: According to `xterm`, we should also reset origin
mode and screen margins, see ``xterm/screen.c:1761``.
:param int lines: number of lines in the new screen.
:param int columns: number of columns in the new screen.
"""
lines = lines or self.lines
columns = columns or self.columns
# First resize the lines:
diff = self.lines - lines
# a) if the current display size is less than the requested
# size, add lines to the bottom.
if diff < 0:
self.buffer.extend(take(self.columns, self.default_line)
for _ in range(diff, 0))
# b) if the current display size is greater than requested
# size, take lines off the top.
elif diff > 0:
self.buffer[:diff] = ()
# Then resize the columns:
diff = self.columns - columns
# a) if the current display size is less than the requested
# size, expand each line to the new size.
if diff < 0:
for y in range(lines):
self.buffer[y].extend(take(abs(diff), self.default_line))
# b) if the current display size is greater than requested
# size, trim each line from the right to the new size.
elif diff > 0:
for line in self.buffer:
del line[columns:]
self.lines, self.columns = lines, columns
self.margins = Margins(0, self.lines - 1)
self.reset_mode(mo.DECOM)
def set_margins(self, top=None, bottom=None):
"""Selects top and bottom margins for the scrolling region.
Margins determine which screen lines move during scrolling
(see :meth:`index` and :meth:`reverse_index`). Characters added
outside the scrolling region do not cause the screen to scroll.
:param int top: the smallest line number that is scrolled.
:param int bottom: the biggest line number that is scrolled.
"""
if top is None or bottom is None:
return
# Arguments are 1-based, while :attr:`margins` are zero based --
# so we have to decrement them by one. We also make sure that
# both of them is bounded by [0, lines - 1].
top = max(0, min(top - 1, self.lines - 1))
bottom = max(0, min(bottom - 1, self.lines - 1))
# Even though VT102 and VT220 require DECSTBM to ignore regions
# of width less than 2, some programs (like aptitude for example)
# rely on it. Practicality beats purity.
if bottom - top >= 1:
self.margins = Margins(top, bottom)
# The cursor moves to the home position when the top and
# bottom margins of the scrolling region (DECSTBM) changes.
self.cursor_position()
def set_charset(self, code, mode):
"""Set active ``G0`` or ``G1`` charset.
:param str code: character set code, should be a character
from ``"B0UK"`` -- otherwise ignored.
:param str mode: if ``"("`` ``G0`` charset is set, if
``")"`` -- we operate on ``G1``.
.. warning:: User-defined charsets are currently not supported.
"""
if code in cs.MAPS:
setattr(self, {"(": "g0_charset", ")": "g1_charset"}[mode],
cs.MAPS[code])
def set_mode(self, *modes, **kwargs):
"""Sets (enables) a given list of modes.
:param list modes: modes to set, where each mode is a constant
from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
self.mode.update(modes)
# When DECOLM mode is set, the screen is erased and the cursor
# moves to the home position.
if mo.DECCOLM in modes:
self.resize(columns=132)
self.erase_in_display(2)
self.cursor_position()
# According to `vttest`, DECOM should also home the cursor, see
# vttest/main.c:303.
if mo.DECOM in modes:
self.cursor_position()
# Mark all displayed characters as reverse.
if mo.DECSCNM in modes:
self.buffer[:] = ([char._replace(reverse=True) for char in line]
for line in self.buffer)
self.select_graphic_rendition(g._SGR["+reverse"])
# Make the cursor visible.
if mo.DECTCEM in modes:
self.cursor.hidden = False
def reset_mode(self, *modes, **kwargs):
"""Resets (disables) a given list of modes.
:param list modes: modes to reset -- hopefully, each mode is a
constant from :mod:`pyte.modes`.
"""
# Private mode codes are shifted, to be distingiushed from non
# private ones.
if kwargs.get("private"):
modes = [mode << 5 for mode in modes]
self.mode.difference_update(modes)
# Lines below follow the logic in :meth:`set_mode`.
if mo.DECCOLM in modes:
self.resize(columns=80)
self.erase_in_display(2)
self.cursor_position()
if mo.DECOM in modes:
self.cursor_position()
if mo.DECSCNM in modes:
self.buffer[:] = ([char._replace(reverse=False) for char in line]
for line in self.buffer)
self.select_graphic_rendition(g._SGR["-reverse"])
# Hide the cursor.
if mo.DECTCEM in modes:
self.cursor.hidden = True
def shift_in(self):
"""Activates ``G0`` character set."""
self.charset = 0
def shift_out(self):
"""Activates ``G1`` character set."""
self.charset = 1
def draw(self, char):
"""Display a character at the current cursor position and advance
the cursor if :data:`~pyte.modes.DECAWM` is set.
:param str char: a character to display.
"""
# Translating a given character.
char = char.translate([self.g0_charset,
self.g1_charset][self.charset])
# If this was the last column in a line and auto wrap mode is
# enabled, move the cursor to the beginning of the next line,
# otherwise replace characters already displayed with newly
# entered.
if self.cursor.x == self.columns:
if mo.DECAWM in self.mode:
self.carriage_return()
self.linefeed()
else:
self.cursor.x -= 1
# If Insert mode is set, new characters move old characters to
# the right, otherwise terminal is in Replace mode and new
# characters replace old characters at cursor position.
if mo.IRM in self.mode:
self.insert_characters(1)
self.buffer[self.cursor.y][self.cursor.x] = self.cursor.attrs \
._replace(data=char)
# .. note:: We can't use :meth:`cursor_forward()`, because that
# way, we'll never know when to linefeed.
self.cursor.x += 1
def carriage_return(self):
"""Move the cursor to the beginning of the current line."""
self.cursor.x = 0
def index(self):
"""Move the cursor down one line in the same column. If the
cursor is at the last line, create a new line at the bottom.
"""
top, bottom = self.margins
if self.cursor.y == bottom:
self.buffer.pop(top)
self.buffer.insert(bottom, take(self.columns, self.default_line))
else:
self.cursor_down()
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins
if self.cursor.y == top:
self.buffer.pop(bottom)
self.buffer.insert(top, take(self.columns, self.default_line))
else:
self.cursor_up()
def linefeed(self):
"""Performs an index and, if :data:`~pyte.modes.LNM` is set, a
carriage return.
"""
self.index()
if mo.LNM in self.mode:
self.carriage_return()
self.ensure_bounds()
def tab(self):
"""Move to the next tab space, or the end of the screen if there
aren't anymore left.
"""
for stop in sorted(self.tabstops):
if self.cursor.x < stop:
column = stop
break
else:
column = self.columns - 1
self.cursor.x = column
def backspace(self):
"""Move cursor to the left one or keep it in it's position if
it's at the beginning of the line already.
"""
self.cursor_back()
def save_cursor(self):
"""Push the current cursor position onto the stack."""
self.savepoints.append(Savepoint(copy.copy(self.cursor),
self.g0_charset,
self.g1_charset,
self.charset,
mo.DECOM in self.mode,
mo.DECAWM in self.mode))
def restore_cursor(self):
"""Set the current cursor position to whatever cursor is on top
of the stack.
"""
if self.savepoints:
savepoint = self.savepoints.pop()
self.g0_charset = savepoint.g0_charset
self.g1_charset = savepoint.g1_charset
self.charset = savepoint.charset
if savepoint.origin:
self.set_mode(mo.DECOM)
if savepoint.wrap:
self.set_mode(mo.DECAWM)
self.cursor = savepoint.cursor
self.ensure_bounds(use_margins=True)
else:
# If nothing was saved, the cursor moves to home position;
# origin mode is reset. :todo: DECAWM?
self.reset_mode(mo.DECOM)
self.cursor_position()
def insert_lines(self, count=None):
"""Inserts the indicated # of lines at line with cursor. Lines
displayed **at** and below the cursor move down. Lines moved
past the bottom margin are lost.
:param count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins
# If cursor is outside scrolling margins it -- do nothin'.
if top <= self.cursor.y <= bottom:
# v +1, because range() is exclusive.
for line in range(self.cursor.y,
min(bottom + 1, self.cursor.y + count)):
self.buffer.pop(bottom)
self.buffer.insert(line, take(self.columns, self.default_line))
self.carriage_return()
def delete_lines(self, count=None):
"""Deletes the indicated # of lines, starting at line with
cursor. As lines are deleted, lines displayed below cursor
move up. Lines added to bottom of screen have spaces with same
character attributes as last line moved up.
:param int count: number of lines to delete.
"""
count = count or 1
top, bottom = self.margins
# If cursor is outside scrolling margins it -- do nothin'.
if top <= self.cursor.y <= bottom:
# v -- +1 to include the bottom margin.
for _ in range(min(bottom - self.cursor.y + 1, count)):
self.buffer.pop(self.cursor.y)
self.buffer.insert(bottom, list(
repeat(self.cursor.attrs, self.columns)))
self.carriage_return()
def insert_characters(self, count=None):
"""Inserts the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
count = count or 1
for _ in range(min(self.columns - self.cursor.y, count)):
self.buffer[self.cursor.y].insert(self.cursor.x, self.cursor.attrs)
self.buffer[self.cursor.y].pop()
def delete_characters(self, count=None):
"""Deletes the indicated # of characters, starting with the
character at cursor position. When a character is deleted, all
characters to the right of cursor move left. Character attributes
move with the characters.
:param int count: number of characters to delete.
"""
count = count or 1
for _ in range(min(self.columns - self.cursor.x, count)):
self.buffer[self.cursor.y].pop(self.cursor.x)
self.buffer[self.cursor.y].append(self.cursor.attrs)
def erase_characters(self, count=None):
"""Erases the indicated # of characters, starting with the
character at cursor position. Character attributes are set
cursor attributes. The cursor remains in the same position.
:param int count: number of characters to erase.
.. warning::
Even though *ALL* of the VTXXX manuals state that character
attributes **should be reset to defaults**, ``libvte``,
``xterm`` and ``ROTE`` completely ignore this. Same applies
too all ``erase_*()`` and ``delete_*()`` methods.
"""
count = count or 1
for column in range(self.cursor.x,
min(self.cursor.x + count, self.columns)):
self.buffer[self.cursor.y][column] = self.cursor.attrs
def erase_in_line(self, type_of=0, private=False):
"""Erases a line in a specific way.
:param int type_of: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of line, including cursor
position.
* ``1`` -- Erases from beginning of line to cursor,
including cursor position.
* ``2`` -- Erases complete line.
:param bool private: when ``True`` character attributes aren left
unchanged **not implemented**.
"""
interval = (
# a) erase from the cursor to the end of line, including
# the cursor,
range(self.cursor.x, self.columns),
# b) erase from the beginning of the line to the cursor,
# including it,
range(0, self.cursor.x + 1),
# c) erase the entire line.
range(0, self.columns)
)[type_of]
for column in interval:
self.buffer[self.cursor.y][column] = self.cursor.attrs
def erase_in_display(self, type_of=0, private=False):
"""Erases display in a specific way.
:param int type_of: defines the way the line should be erased in:
* ``0`` -- Erases from cursor to end of screen, including
cursor position.
* ``1`` -- Erases from beginning of screen to cursor,
including cursor position.
* ``2`` -- Erases complete display. All lines are erased
and changed to single-width. Cursor does not move.
:param bool private: when ``True`` character attributes aren left
unchanged **not implemented**.
"""
interval = (
# a) erase from cursor to the end of the display, including
# the cursor,
range(self.cursor.y + 1, self.lines),
# b) erase from the beginning of the display to the cursor,
# including it,
range(0, self.cursor.y),
# c) erase the whole display.
range(0, self.lines)
)[type_of]
for line in interval:
self.buffer[line][:] = \
(self.cursor.attrs for _ in range(self.columns))
# In case of 0 or 1 we have to erase the line with the cursor.
if type_of in [0, 1]:
self.erase_in_line(type_of)
def set_tab_stop(self):
"""Sest a horizontal tab stop at cursor position."""
self.tabstops.add(self.cursor.x)
def clear_tab_stop(self, type_of=None):
"""Clears a horizontal tab stop in a specific way, depending
on the ``type_of`` value:
* ``0`` or nothing -- Clears a horizontal tab stop at cursor
position.
* ``3`` -- Clears all horizontal tab stops.
"""
if not type_of:
# Clears a horizontal tab stop at cursor position, if it's
# present, or silently fails if otherwise.
self.tabstops.discard(self.cursor.x)
elif type_of == 3:
self.tabstops = set() # Clears all horizontal tab stops.
def ensure_bounds(self, use_margins=None):
"""Ensure that current cursor position is within screen bounds.
:param bool use_margins: when ``True`` or when
:data:`~pyte.modes.DECOM` is set,
cursor is bounded by top and and bottom
margins, instead of ``[0; lines - 1]``.
"""
if use_margins or mo.DECOM in self.mode:
top, bottom = self.margins
else:
top, bottom = 0, self.lines - 1
self.cursor.x = min(max(0, self.cursor.x), self.columns - 1)
self.cursor.y = min(max(top, self.cursor.y), bottom)
def cursor_up(self, count=None):
"""Moves cursor up the indicated # of lines in same column.
Cursor stops at top margin.
:param int count: number of lines to skip.
"""
self.cursor.y -= count or 1
self.ensure_bounds(use_margins=True)
def cursor_up1(self, count=None):
"""Moves cursor up the indicated # of lines to column 1. Cursor
stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_up(count)
self.carriage_return()
def cursor_down(self, count=None):
"""Moves cursor down the indicated # of lines in same column.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor.y += count or 1
self.ensure_bounds(use_margins=True)
def cursor_down1(self, count=None):
"""Moves cursor down the indicated # of lines to column 1.
Cursor stops at bottom margin.
:param int count: number of lines to skip.
"""
self.cursor_down(count)
self.carriage_return()
def cursor_back(self, count=None):
"""Moves cursor left the indicated # of columns. Cursor stops
at left margin.
:param int count: number of columns to skip.
"""
self.cursor.x -= count or 1
self.ensure_bounds()
def cursor_forward(self, count=None):
"""Moves cursor right the indicated # of columns. Cursor stops
at right margin.
:param int count: number of columns to skip.
"""
self.cursor.x += count or 1
self.ensure_bounds()
def cursor_position(self, line=None, column=None):
"""Set the cursor to a specific `line` and `column`.
Cursor is allowed to move out of the scrolling region only when
:data:`~pyte.modes.DECOM` is reset, otherwise -- the position
doesn't change.
:param int line: line number to move the cursor to.
:param int column: column number to move the cursor to.
"""
column = (column or 1) - 1
line = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if mo.DECOM in self.mode:
line += self.margins.top
# Cursor is not allowed to move out of the scrolling region.
if not self.margins.top <= line <= self.margins.bottom:
return
self.cursor.x, self.cursor.y = column, line
self.ensure_bounds()
def cursor_to_column(self, column=None):
"""Moves cursor to a specific column in the current line.
:param int column: column number to move the cursor to.
"""
self.cursor.x = (column or 1) - 1
self.ensure_bounds()
def cursor_to_line(self, line=None):
"""Moves cursor to a specific line in the current column.
:param int line: line number to move the cursor to.
"""
self.cursor.y = (line or 1) - 1
# If origin mode (DECOM) is set, line number are relative to
# the top scrolling margin.
if mo.DECOM in self.mode:
self.cursor.y += self.margins.top
# FIXME: should we also restrict the cursor to the scrolling
# region?
self.ensure_bounds()
def bell(self, *args):
"""Bell stub -- the actual implementation should probably be
provided by the end-user.
"""
def alignment_display(self):
"""Fills screen with uppercase E's for screen focus and alignment."""
for line in self.buffer:
for column, char in enumerate(line):
line[column] = char._replace(data="E")
def select_graphic_rendition(self, *attrs):
"""Set display attributes.
:param list attrs: a list of display attributes to set.
"""
replace = {}
for attr in attrs or [0]:
if attr in g.FG:
replace["fg"] = g.FG[attr]
elif attr in g.BG:
replace["bg"] = g.BG[attr]
elif attr in g.TEXT:
attr = g.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif not attr:
replace = self.default_char._asdict()
self.cursor.attrs = self.cursor.attrs._replace(**replace)
class DiffScreen(Screen):
"""A screen subclass, which maintains a set of dirty lines in its
:attr:`dirty` attribute. The end user is responsible for emptying
a set, when a diff is applied.
.. attribute:: dirty
A set of line numbers, which should be re-drawn.
>>> screen = DiffScreen(80, 24)
>>> screen.dirty.clear()
>>> screen.draw(u"!")
>>> screen.dirty
set([0])
"""
def __init__(self, *args):
self.dirty = set()
super(DiffScreen, self).__init__(*args)
def set_mode(self, *modes, **kwargs):
if mo.DECSCNM >> 5 in modes and kwargs.get("private"):
self.dirty.update(range(self.lines))
super(DiffScreen, self).set_mode(*modes, **kwargs)
def reset_mode(self, *modes, **kwargs):
if mo.DECSCNM >> 5 in modes and kwargs.get("private"):
self.dirty.update(range(self.lines))
super(DiffScreen, self).reset_mode(*modes, **kwargs)
def reset(self):
self.dirty.update(range(self.lines))
super(DiffScreen, self).reset()
def resize(self, *args, **kwargs):
self.dirty.update(range(self.lines))
super(DiffScreen, self).resize(*args, **kwargs)
def draw(self, *args):
self.dirty.add(self.cursor.y)
super(DiffScreen, self).draw(*args)
def index(self):
if self.cursor.y == self.margins.bottom:
self.dirty.update(range(self.lines))
super(DiffScreen, self).index()
def reverse_index(self):
if self.cursor.y == self.margins.top:
self.dirty.update(range(self.lines))
super(DiffScreen, self).reverse_index()
def insert_lines(self, *args):
self.dirty.update(range(self.cursor.y, self.lines))
super(DiffScreen, self).insert_lines(*args)
def delete_lines(self, *args):
self.dirty.update(range(self.cursor.y, self.lines))
super(DiffScreen, self).delete_lines(*args)
def insert_characters(self, *args):
self.dirty.add(self.cursor.y)
super(DiffScreen, self).insert_characters(*args)
def delete_characters(self, *args):
self.dirty.add(self.cursor.y)
super(DiffScreen, self).delete_characters(*args)
def erase_characters(self, *args):
self.dirty.add(self.cursor.y)
super(DiffScreen, self).erase_characters(*args)
def erase_in_line(self, *args):
self.dirty.add(self.cursor.y)
super(DiffScreen, self).erase_in_line(*args)
def erase_in_display(self, type_of=0):
self.dirty.update((
range(self.cursor.y + 1, self.lines),
range(0, self.cursor.y),
range(0, self.lines)
)[type_of])
super(DiffScreen, self).erase_in_display(type_of)
def alignment_display(self):
self.dirty.update(range(self.lines))
super(DiffScreen, self).alignment_display()
History = namedtuple("History", "top bottom ratio size position")
class HistoryScreen(DiffScreen):
"""A screen subclass, which keeps track of screen history and allows
pagination. This is not linux-specific, but still useful; see page
462 of VT520 User's Manual.
:param int history: total number of history lines to keep; is split
between top and bottom queues.
:param int ratio: defines how much lines to scroll on :meth:`next_page`
and :meth:`prev_page` calls.
.. attribute:: history
A pair of history queues for top and bottom margins accordingly;
here's the overall screen structure::
[ 1: .......]
[ 2: .......] <- top history
[ 3: .......]
------------
[ 4: .......] s
[ 5: .......] c
[ 6: .......] r
[ 7: .......] e
[ 8: .......] e
[ 9: .......] n
------------
[10: .......]
[11: .......] <- bottom history
[12: .......]
.. note::
Don't forget to update :class:`~pyte.streams.Stream` class with
appropriate escape sequences -- you can use any, since pagination
protocol is not standardized, for example::
Stream.escape["N"] = "next_page"
Stream.escape["P"] = "prev_page"
"""
def __init__(self, columns, lines, history=100, ratio=.5):
self.history = History(deque(maxlen=history // 2),
deque(maxlen=history),
float(ratio),
history,
history)
super(HistoryScreen, self).__init__(columns, lines)
def __before__(self, command):
"""Ensures a screen is at the bottom of the history buffer."""
if command not in ["prev_page", "next_page"]:
while self.history.position < self.history.size:
self.next_page()
super(HistoryScreen, self).__before__(command)
def __after__(self, command):
"""Ensures all lines on a screen have proper width (:attr:`columns`).
Extra characters are truncated, missing characters are filled
with whitespace.
"""
if command in ["prev_page", "next_page"]:
for idx, line in enumerate(self.buffer):
if len(line) > self.columns:
self.buffer[idx] = line[:self.columns]
elif len(line) < self.columns:
self.buffer[idx] = line + take(self.columns - len(line),
self.default_line)
# If we're at the bottom of the history buffer and `DECTCEM`
# mode is set -- show the cursor.
self.cursor.hidden = not (
abs(self.history.position - self.history.size) < self.lines and
mo.DECTCEM in self.mode
)
super(HistoryScreen, self).__after__(command)
def reset(self):
"""Overloaded to reset screen history state: history position
is reset to bottom of both queues; queues themselves are
emptied.
"""
super(HistoryScreen, self).reset()
self.history.top.clear()
self.history.bottom.clear()
self.history = self.history._replace(position=self.history.size)
def index(self):
"""Overloaded to update top history with the removed lines."""
top, bottom = self.margins
if self.cursor.y == bottom:
self.history.top.append(self.buffer[top])
super(HistoryScreen, self).index()
def reverse_index(self):
"""Overloaded to update bottom history with the removed lines."""
top, bottom = self.margins
if self.cursor.y == top:
self.history.bottom.append(self.buffer[bottom])
super(HistoryScreen, self).reverse_index()
def prev_page(self):
"""Moves the screen page up through the history buffer. Page
size is defined by ``history.ratio``, so for instance
``ratio = .5`` means that half the screen is restored from
history on page switch.
"""
if self.history.position > self.lines and self.history.top:
mid = min(len(self.history.top),
int(math.ceil(self.lines * self.history.ratio)))
self.history.bottom.extendleft(reversed(self.buffer[-mid:]))
self.history = self.history \
._replace(position=self.history.position - self.lines)
self.buffer[:] = list(reversed([
self.history.top.pop() for _ in range(mid)
])) + self.buffer[:-mid]
self.dirty = set(range(self.lines))
def next_page(self):
"""Moves the screen page down through the history buffer."""
if self.history.position < self.history.size and self.history.bottom:
mid = min(len(self.history.bottom),
int(math.ceil(self.lines * self.history.ratio)))
self.history.top.extend(self.buffer[:mid])
self.history = self.history \
._replace(position=self.history.position + self.lines)
self.buffer[:] = self.buffer[mid:] + [
self.history.bottom.popleft() for _ in range(mid)
]
self.dirty = set(range(self.lines))
| |
#!/usr/bin/env python
# coding: utf-8
"""
This script supports publishing Pystache to PyPI.
This docstring contains instructions to Pystache maintainers on how
to release a new version of Pystache.
(1) Prepare the release.
Make sure the code is finalized and merged to master. Bump the version
number in setup.py, update the release date in the HISTORY file, etc.
Generate the reStructuredText long_description using--
$ python setup.py prep
and be sure this new version is checked in. You must have pandoc installed
to do this step:
http://johnmacfarlane.net/pandoc/
It helps to review this auto-generated file on GitHub prior to uploading
because the long description will be sent to PyPI and appear there after
publishing. PyPI attempts to convert this string to HTML before displaying
it on the PyPI project page. If PyPI finds any issues, it will render it
instead as plain-text, which we do not want.
To check in advance that PyPI will accept and parse the reST file as HTML,
you can use the rst2html program installed by the docutils package
(http://docutils.sourceforge.net/). To install docutils:
$ pip install docutils
To check the file, run the following command and confirm that it reports
no warnings:
$ python setup.py --long-description | rst2html.py -v --no-raw > out.html
See here for more information:
http://docs.python.org/distutils/uploading.html#pypi-package-display
(2) Push to PyPI. To release a new version of Pystache to PyPI--
http://pypi.python.org/pypi/pystache
create a PyPI user account if you do not already have one. The user account
will need permissions to push to PyPI. A current "Package Index Owner" of
Pystache can grant you those permissions.
When you have permissions, run the following:
python setup.py publish
If you get an error like the following--
Upload failed (401): You must be identified to edit package information
then add a file called .pyirc to your home directory with the following
contents:
[server-login]
username: <PyPI username>
password: <PyPI password>
as described here, for example:
http://docs.python.org/release/2.5.2/dist/pypirc.html
(3) Tag the release on GitHub. Here are some commands for tagging.
List current tags:
git tag -l -n3
Create an annotated tag:
git tag -a -m "Version 0.5.1" "v0.5.1"
Push a tag to GitHub:
git push --tags defunkt v0.5.1
"""
import os
import shutil
import sys
py_version = sys.version_info
# distutils does not seem to support the following setup() arguments.
# It displays a UserWarning when setup() is passed those options:
#
# * entry_points
# * install_requires
#
# distribute works with Python 2.3.5 and above:
#
# http://packages.python.org/distribute/setuptools.html#building-and-distributing-packages-with-distribute
#
if py_version < (2, 3, 5):
# TODO: this might not work yet.
import distutils as dist
from distutils import core
setup = core.setup
else:
import setuptools as dist
setup = dist.setup
VERSION = '0.5.4-handlebars' # Also change in pystache/__init__.py.
FILE_ENCODING = 'utf-8'
README_PATH = 'README.md'
HISTORY_PATH = 'HISTORY.md'
LICENSE_PATH = 'LICENSE'
RST_DESCRIPTION_PATH = 'setup_description.rst'
TEMP_EXTENSION = '.temp'
PREP_COMMAND = 'prep'
CLASSIFIERS = (
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: Implementation :: PyPy',
)
# Comments in reST begin with two dots.
RST_LONG_DESCRIPTION_INTRO = """\
.. Do not edit this file. This file is auto-generated for PyPI by setup.py
.. using pandoc, so edits should go in the source files rather than here.
"""
def read(path):
"""
Read and return the contents of a text file as a unicode string.
"""
# This function implementation was chosen to be compatible across Python 2/3.
f = open(path, 'rb')
# We avoid use of the with keyword for Python 2.4 support.
try:
b = f.read()
finally:
f.close()
return b.decode(FILE_ENCODING)
def write(u, path):
"""
Write a unicode string to a file (as utf-8).
"""
print("writing to: %s" % path)
# This function implementation was chosen to be compatible across Python 2/3.
f = open(path, "wb")
try:
b = u.encode(FILE_ENCODING)
f.write(b)
finally:
f.close()
def make_temp_path(path, new_ext=None):
"""
Arguments:
new_ext: the new file extension, including the leading dot.
Defaults to preserving the existing file extension.
"""
root, ext = os.path.splitext(path)
if new_ext is None:
new_ext = ext
temp_path = root + TEMP_EXTENSION + new_ext
return temp_path
def strip_html_comments(text):
"""Strip HTML comments from a unicode string."""
lines = text.splitlines(True) # preserve line endings.
# Remove HTML comments (which we only allow to take a special form).
new_lines = filter(lambda line: not line.startswith("<!--"), lines)
return "".join(new_lines)
# We write the converted file to a temp file to simplify debugging and
# to avoid removing a valid pre-existing file on failure.
def convert_md_to_rst(md_path, rst_temp_path):
"""
Convert the contents of a file from Markdown to reStructuredText.
Returns the converted text as a Unicode string.
Arguments:
md_path: a path to a UTF-8 encoded Markdown file to convert.
rst_temp_path: a temporary path to which to write the converted contents.
"""
# Pandoc uses the UTF-8 character encoding for both input and output.
command = "pandoc --write=rst --output=%s %s" % (rst_temp_path, md_path)
print("converting with pandoc: %s to %s\n-->%s" % (md_path, rst_temp_path,
command))
if os.path.exists(rst_temp_path):
os.remove(rst_temp_path)
os.system(command)
if not os.path.exists(rst_temp_path):
s = ("Error running: %s\n"
" Did you install pandoc per the %s docstring?" % (command,
__file__))
sys.exit(s)
return read(rst_temp_path)
# The long_description needs to be formatted as reStructuredText.
# See the following for more information:
#
# http://docs.python.org/distutils/setupscript.html#additional-meta-data
# http://docs.python.org/distutils/uploading.html#pypi-package-display
#
def make_long_description():
"""
Generate the reST long_description for setup() from source files.
Returns the generated long_description as a unicode string.
"""
readme_path = README_PATH
# Remove our HTML comments because PyPI does not allow it.
# See the setup.py docstring for more info on this.
readme_md = strip_html_comments(read(readme_path))
history_md = strip_html_comments(read(HISTORY_PATH))
license_md = """\
License
=======
""" + read(LICENSE_PATH)
sections = [readme_md, history_md, license_md]
md_description = '\n\n'.join(sections)
# Write the combined Markdown file to a temp path.
md_ext = os.path.splitext(readme_path)[1]
md_description_path = make_temp_path(RST_DESCRIPTION_PATH, new_ext=md_ext)
write(md_description, md_description_path)
rst_temp_path = make_temp_path(RST_DESCRIPTION_PATH)
long_description = convert_md_to_rst(md_path=md_description_path,
rst_temp_path=rst_temp_path)
return "\n".join([RST_LONG_DESCRIPTION_INTRO, long_description])
def prep():
"""Update the reST long_description file."""
long_description = make_long_description()
write(long_description, RST_DESCRIPTION_PATH)
def publish():
"""Publish this package to PyPI (aka "the Cheeseshop")."""
long_description = make_long_description()
if long_description != read(RST_DESCRIPTION_PATH):
print("""\
Description file not up-to-date: %s
Run the following command and commit the changes--
python setup.py %s
""" % (RST_DESCRIPTION_PATH, PREP_COMMAND))
sys.exit()
print("Description up-to-date: %s" % RST_DESCRIPTION_PATH)
answer = raw_input("Are you sure you want to publish to PyPI (yes/no)?")
if answer != "yes":
exit("Aborted: nothing published")
os.system('python setup.py sdist upload')
# We use the package simplejson for older Python versions since Python
# does not contain the module json before 2.6:
#
# http://docs.python.org/library/json.html
#
# Moreover, simplejson stopped officially support for Python 2.4 in version 2.1.0:
#
# https://github.com/simplejson/simplejson/blob/master/CHANGES.txt
#
requires = ['PyYAML==3.11']
if py_version < (2, 5):
requires.append('simplejson<2.1')
elif py_version < (2, 6):
requires.append('simplejson')
INSTALL_REQUIRES = requires
# TODO: decide whether to use find_packages() instead. I'm not sure that
# find_packages() is available with distutils, for example.
PACKAGES = [
'pystache',
'pystache.commands',
# The following packages are only for testing.
'pystache.tests',
'pystache.tests.data',
'pystache.tests.data.locator',
'pystache.tests.examples',
]
# The purpose of this function is to follow the guidance suggested here:
#
# http://packages.python.org/distribute/python3.html#note-on-compatibility-with-setuptools
#
# The guidance is for better compatibility when using setuptools (e.g. with
# earlier versions of Python 2) instead of Distribute, because of new
# keyword arguments to setup() that setuptools may not recognize.
def get_extra_args():
"""
Return a dictionary of extra args to pass to setup().
"""
extra = {}
# TODO: it might be more correct to check whether we are using
# Distribute instead of setuptools, since use_2to3 doesn't take
# effect when using Python 2, even when using Distribute.
if py_version >= (3, ):
# Causes 2to3 to be run during the build step.
extra['use_2to3'] = True
return extra
def main(sys_argv):
# TODO: use the logging module instead of printing.
# TODO: include the following in a verbose mode.
sys.stderr.write("pystache: using: version %s of %s\n" % (repr(dist.__version__), repr(dist)))
command = sys_argv[-1]
if command == 'publish':
publish()
sys.exit()
elif command == PREP_COMMAND:
prep()
sys.exit()
long_description = read(RST_DESCRIPTION_PATH)
template_files = ['*.mustache', '*.txt']
extra_args = get_extra_args()
setup(name='pystache',
version=VERSION,
license='MIT',
description='Mustache for Python',
long_description=long_description,
author='Chris Wanstrath',
author_email='chris@ozmm.org',
maintainer='Chris Jerdonek',
maintainer_email='chris.jerdonek@gmail.com',
url='http://github.com/defunkt/pystache',
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
package_data = {
# Include template files so tests can be run.
'pystache.tests.data': template_files,
'pystache.tests.data.locator': template_files,
'pystache.tests.examples': template_files,
},
entry_points = {
'console_scripts': [
'pystache=pystache.commands.render:main',
'pystache-test=pystache.commands.test:main',
],
},
classifiers = CLASSIFIERS,
**extra_args
)
if __name__=='__main__':
main(sys.argv)
| |
import yaml
from scoring_engine.config import config
from scoring_engine.engine.engine import Engine
from scoring_engine.models.team import Team
from scoring_engine.models.user import User
from scoring_engine.models.service import Service
from scoring_engine.models.account import Account
from scoring_engine.models.environment import Environment
from scoring_engine.models.property import Property
from scoring_engine.logger import logger
class Competition(dict):
@staticmethod
def parse_yaml_str(input_str):
data = yaml.safe_load(input_str)
return Competition(data)
def __init__(self, data):
self.available_checks = Engine.load_check_files(config.checks_location)
self.required_services = None
self.verify_data(data)
super(Competition, self).__init__(data)
def verify_data(self, data):
# verify teams is in project root
assert 'teams' in data, 'teams must be defined on the root'
assert type(data['teams']) == list, 'teams must be an array'
for team in data['teams']:
self.verify_team_data(team)
# Verify there are no duplicate user usernames on any of the teams
usernames = []
for team in data['teams']:
for user in team['users']:
assert user['username'] not in usernames, "Multiple Users with the same username: '{0}'".format(user['username'])
usernames.append(user['username'])
def verify_team_data(self, team):
# Verify team name
assert 'name' in team, "team must have a 'name' field"
assert type(team['name']) is str, 'team name must be a string'
# Verify team color
assert 'color' in team, "'{0}' must have a 'color' field".format(team['name'])
assert type(team['color']) is str, "'{0}' color must a string".format(team['name'])
assert team['color'] in ('Blue', 'White', 'Red'), "'{0}' color must one of (Red, White, Blue)".format(team['name'])
# Verify team users
assert 'users' in team, "'{0}' must have a 'users' field".format(team['name'])
assert type(team['users']) is list, "'{0}' 'users' field must be an array".format(team['name'])
for user in team['users']:
self.verify_user_data(user, team['name'])
# Verify team services if blue team
if team['color'] == 'Blue':
assert 'services' in team, "'{0}' must have a 'services' field".format(team['name'])
assert type(team['services']) is list, "'{0}' 'services' field must be an array".format(team['name'])
for service in team['services']:
self.verify_service_data(service, team['name'])
if self.required_services is None:
self.required_services = []
for service in team['services']:
self.required_services = team['services']
# Verify each required service is defined on this current team
for required_service in self.required_services:
# Find team_service by name
team_service = None
for tmp_service in team['services']:
if tmp_service['name'] == required_service['name']:
team_service = tmp_service
assert team_service is not None, "Service '{0}' not defined in team '{1}'".format(required_service['name'], team['name'])
assert team_service['name'] == required_service['name'], "Team '{0}' missing '{1}' Expecting '{2}'".format(team['name'], required_service['name'], team_service['name'])
assert team_service['check_name'] == required_service['check_name'], "Incorrect check_name for Service '{0}' for Team '{1}'. Got: '{2}' Expected: {3}".format(team_service['name'], team['name'], team_service['check_name'], required_service['check_name'])
assert team_service['points'] == required_service['points'], "Incorrect points for Service '{0}' for Team '{1}'. Got: {2} Expected: {3}".format(team_service['name'], team['name'], team_service['points'], required_service['points'])
assert len(team_service['environments']) == len(required_service['environments'])
# Verify there aren't services defined in the current team but not in others
for team_service in team['services']:
required_service = None
for tmp_service in self.required_services:
if tmp_service['name'] == team_service['name']:
required_service = tmp_service
assert required_service is not None, "Service '{0}' for Team '{1}' not defined in other teams".format(team_service['name'], team['name'])
# Verify each team service must have unique names
team_service_names = []
for service in team['services']:
assert service['name'] not in team_service_names, "Each team's service must have a unique name, found duplicates in '{0}' for team '{1}'".format(service['name'], team['name'])
team_service_names.append(service['name'])
def verify_user_data(self, user, team_name):
# Verify user username
assert 'username' in user, "{0} user must have a 'username' field".format(team_name)
assert type(user['username']) is str, "{0} user username must a string".format(team_name)
# Verify user password
assert 'password' in user, "{0} user must have a 'password' field".format(team_name)
assert type(user['password']) is str, "{0} user password must a string".format(team_name)
def verify_service_data(self, service, team_name):
# Verify service name
assert 'name' in service, "{0} service must have a 'name' field".format(team_name)
assert type(service['name']) is str, "{0} service 'name' must be a string".format(team_name)
# Verify service check_name
assert 'check_name' in service, "{0} {1} service must have a 'check_name' field".format(team_name, service['name'])
assert type(service['check_name']) is str, "{0} {1} service 'check_name' field must be a string".format(team_name, service['name'])
# Verify check_name maps correctly to a real check source code class
found_check = None
for available_check in self.available_checks:
if service['check_name'] == available_check.__name__:
found_check = available_check
assert found_check is not None, "{0} {1} Incorrect 'check_name' field, must match the classname of a check defined in {2}".format(team_name, service['name'], config.checks_location)
# Verify service host
assert 'host' in service, "{0} {1} service must have a 'host' field".format(team_name, service['name'])
assert type(service['host']) is str, "{0} {1} service 'host' field must be a string".format(team_name, service['name'])
# Verify service worker_queue if it exists
if 'worker_queue' in service:
assert type(service['worker_queue']) is str, "{0} {1} service 'worker_queue' field must be a string".format(team_name, service['name'])
# Verify service port
assert 'port' in service, "{0} {1} service must have a 'port' field".format(team_name, service['name'])
assert type(service['port']) is int, "{0} {1} service 'port' field must be an integer".format(team_name, service['name'])
# Verify service points
assert 'points' in service, "{0} {1} service must have a 'points' field".format(team_name, service['name'])
assert type(service['points']) is int, "{0} {1} service 'points' field must be an integer".format(team_name, service['name'])
if 'accounts' in service:
assert type(service['accounts']) is list, "{0} {1} service 'accounts' field must be an array".format(team_name, service['name'])
for account in service['accounts']:
self.verify_account_data(account, team_name, service['name'])
# Verify service environments
assert 'environments' in service, "{0} {1} service must have a 'environments' field".format(team_name, service['name'])
assert type(service['environments']) is list, "{0} {1} service 'environments' field must be an array".format(team_name, service['name'])
for environment in service['environments']:
self.verify_environment_data(environment, team_name, service['name'], found_check)
def verify_account_data(self, account, team_name, service_name):
# Verify account username
assert 'username' in account, "{0} {1} account must have a 'username' field".format(team_name, service_name)
assert type(account['username']) is str, "{0} {1} account 'username' field must be a string".format(team_name, service_name)
# Verify account password
assert 'password' in account, "{0} {1} account must have a 'password' field".format(team_name, service_name)
assert type(account['password']) is str, "{0} {1} account 'password' field must be a string".format(team_name, service_name)
def verify_environment_data(self, environment, team_name, service_name, found_check_source):
# Verify environment matching_content
assert 'matching_content' in environment, "{0} {1} environment must have a 'matching_content' field".format(team_name, service_name)
assert type(environment['matching_content']) is str, "{0} {1} environment 'matching_content' field must be a string".format(team_name, service_name)
# Verify environment properties
if 'properties' in environment:
assert type(environment['properties']) is list, "{0} {1} environment 'properties' field must be an array".format(team_name, service_name)
for property_obj in environment['properties']:
self.verify_property_data(property_obj, team_name, service_name, found_check_source)
# Verify that all properties the check source code requires, is listed
for required_property_key in found_check_source.required_properties:
matched_key = False
for defined_property in environment['properties']:
if required_property_key in defined_property['name']:
matched_key = True
assert matched_key is True, "{0} {1} service does not define the '{2}' property".format(team_name, service_name, required_property_key)
def verify_property_data(self, property_obj, team_name, service_name, found_check_source):
# Verify property name
assert 'name' in property_obj, "{0} {1} property must have a 'name' field".format(team_name, service_name)
assert type(property_obj['name']) is str, "{0} {1} property 'name' field must be a string".format(team_name, service_name)
# Verify property value
assert 'value' in property_obj, "{0} {1} property must have a 'value' field".format(team_name, service_name)
assert type(property_obj['value']) is str, "{0} {1} property 'value' field must be a string".format(team_name, service_name)
assert property_obj['name'] in found_check_source.required_properties, "{0} {1} {2} does not require the property '{3}'".format(team_name, service_name, found_check_source.__name__, property_obj['name'])
def save(self, db_session):
for team_dict in self['teams']:
logger.info("Creating {0} Team: {1}".format(team_dict['color'], team_dict['name']))
team_obj = Team(name=team_dict['name'], color=team_dict['color'])
db_session.add(team_obj)
for user_dict in team_dict['users']:
logger.info("\tCreating User {0}:{1}".format(user_dict['username'], user_dict['password']))
db_session.add(User(username=user_dict['username'], password=user_dict['password'], team=team_obj))
if 'services' in team_dict:
for service_dict in team_dict['services']:
logger.info("\tCreating {0} Service".format(service_dict['name']))
service_obj = Service(
name=service_dict['name'],
team=team_obj,
check_name=service_dict['check_name'],
host=service_dict['host'],
port=service_dict['port'],
points=service_dict['points']
)
if 'worker_queue' in service_dict:
service_obj.worker_queue = service_dict['worker_queue']
db_session.add(service_obj)
if 'accounts' in service_dict:
for account_dict in service_dict['accounts']:
db_session.add(Account(username=account_dict['username'], password=account_dict['password'], service=service_obj))
for environment_dict in service_dict['environments']:
environment_obj = Environment(service=service_obj, matching_content=environment_dict['matching_content'])
db_session.add(environment_obj)
if 'properties' in environment_dict:
for property_dict in environment_dict['properties']:
db_session.add(Property(environment=environment_obj, name=property_dict['name'], value=property_dict['value']))
db_session.commit()
| |
# vim: set expandtab ts=4 sw=4 filetype=python fileencoding=utf8:
import copy
import logging
import textwrap
import psycopg2.extras
from profiles.pg import RelationWrapper
log = logging.getLogger(__name__)
class IndicatorsFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return Indicator(**d)
class Indicator(RelationWrapper):
def __init__(self, indicator_uuid, title, description,
pretty_label, indicator_value_format, indicator_category, source_document,
sas_variable, formula, extra_notes,
definition, universe, limitations, note, data_source,
data_as_of, numerator_tables, denominator_tables,
chart_label,
inserted, updated):
self.indicator_uuid = indicator_uuid
self.title = title
self.description = description
self.pretty_label = pretty_label
self.indicator_value_format = indicator_value_format
self.indicator_category = indicator_category
self.source_document = source_document
self.sas_variable = sas_variable
self.formula = formula
self.extra_notes = extra_notes
self.definition = definition
self.universe = universe
self.limitations = limitations
self.note = note
self.data_source = data_source
self.data_as_of = data_as_of
self.numerator_tables = numerator_tables
self.denominator_tables = denominator_tables
self.chart_label = chart_label
self.inserted = inserted
self.updated = updated
# Maybe set this
self.racial_split = []
self.indicator_CV = None
self.indicator_moe = None
@property
def __jsondata__(self):
d = copy.copy(self.__dict__)
return d
def __eq__(self, other):
return self.indicator_uuid == other.indicator_uuid
def __ne__(self, other):
if other:
return self.indicator_uuid != other.indicator_uuid
else:
return True
@classmethod
def by_indicator_uuid(cls, pgconn, indicator_uuid):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (indicators.*)::indicators as indicator
from indicators
where indicator_uuid = %(indicator_uuid)s
"""), dict(indicator_uuid=indicator_uuid))
return cursor.fetchone().indicator
@classmethod
def select_all(cls, pgconn):
qry = textwrap.dedent("""
select (indicators.*)::indicators as x
from indicators
""")
cursor = pgconn.cursor()
cursor.execute(qry)
for row in cursor:
yield row.x
@classmethod
def insert(cls, pgconn, title, description,
indicator_value_format, indicator_category,
source_document, sas_variable, chart_label):
cursor = pgconn.cursor()
if indicator_value_format is None and '_' == title[0]:
indicator_value_format = 'percent'
cursor.execute(textwrap.dedent("""
insert into indicators
(title, description, indicator_value_format,
indicator_category, source_document, sas_variable,
chart_label)
values
(%s, %s, %s, %s, %s, %s, %s)
returning (indicators.*)::indicators as ind
"""),
[title, description, indicator_value_format,
indicator_category, source_document, sas_variable,
chart_label])
return cursor.fetchone().ind
@classmethod
def by_title(cls, pgconn, title):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (indicators.*)::indicators ind
from indicators
where title = %s
"""), [title])
if cursor.rowcount:
return cursor.fetchone().ind
else:
raise KeyError(
"Sorry, no indicator with title {0} found!".format(
title))
@classmethod
def by_sas_variable(cls, pgconn, sas_variable):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select indicators.*::indicators as ind
from indicators
where sas_variable = %s
"""), [sas_variable])
for row in cursor:
yield row.ind
def set_all_visible(self, pgconn, visible=False):
"""
Set all values for this indicator to visible (true / false)
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicator_location_values
set visible = %(visible)s
where indicator_uuid = %(indicator_uuid)s
"""), dict(visible=visible, indicator_uuid=self.indicator_uuid))
return self
def set_visible_years(self, pgconn, start_year, end_year, visible=False):
"""
Set all values for this indicator to visible (true / false)
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicator_location_values
set visible = %(visible)s
where indicator_uuid = %(indicator_uuid)s
and date_part('year', observation_timestamp) >= %(start_year)s
and date_part('year', observation_timestamp) <= %(end_year)s
"""), dict(visible=visible, indicator_uuid=self.indicator_uuid,
end_year=end_year, start_year=start_year))
return self
def set_visible_year(self, pgconn, year, visible=False):
"""
Set all values for this indicator to visible (true / false)
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicator_location_values
set visible = %(visible)s
where indicator_uuid = %(indicator_uuid)s
and observation_timestamp is not null
and date_part('year', observation_timestamp) = %(year)s
"""), dict(visible=visible, indicator_uuid=self.indicator_uuid, year=year))
return self
def update_description(self, pgconn, new_description, chart_label):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicators
set description = %s,
chart_label = %s
where indicator_uuid = %s
returning indicators.*::indicators as updated_ind
"""), [new_description, chart_label, self.indicator_uuid])
if cursor.rowcount:
updated_ind = cursor.fetchone().updated_ind
log.info("Updated description, chart_label on {0} to {1}, {2}".format(
updated_ind,
new_description,
chart_label))
return updated_ind
else:
raise KeyError("Could not find indicator {0}!".format(self))
def update_pretty_label(self, pgconn, new_pretty_label):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicators
set pretty_label = %s
where indicator_uuid = %s
returning indicators.*::indicators as updated_ind
"""), [new_pretty_label, self.indicator_uuid])
if cursor.rowcount:
updated_ind = cursor.fetchone().updated_ind
log.info("Updated pretty_label on {0} to {1}".format(
updated_ind,
updated_ind.pretty_label))
return updated_ind
else:
raise KeyError("Could not find indicator {0}!".format(self))
@classmethod
def update_description_by_title(cls, pgconn, title, description,
chart_label):
"""
Use the title to find this indicator. Then update the
description. Then return the updated indicator.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicators
set description = %s,
chart_label = %s
where title = %s
returning indicators.*::indicators as updated_ind
"""), [description, chart_label, title])
if cursor.rowcount:
updated_ind = cursor.fetchone().updated_ind
log.info("Updated description on {0} to {1}".format(
updated_ind,
description))
return updated_ind
else:
raise KeyError("Could not find indicator {0}!".format(title))
@classmethod
def update_extra_details_by_title(cls, pgconn, title, description,
definition,
universe, limitations, note, data_source, data_as_of,
numerator_tables, denominator_tables, chart_label):
"""
Use the title to find this indicator. Then update
with extra information. Then return the updated indicator.
"""
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicators
set description = %(description)s,
pretty_label = %(description)s,
definition = %(definition)s,
universe = %(universe)s,
limitations = %(limitations)s,
note = %(note)s,
data_source = %(data_source)s,
data_as_of = %(data_as_of)s,
numerator_tables = %(numerator_tables)s,
denominator_tables = %(denominator_tables)s,
chart_label = %(chart_label)s
where title = %(title)s
returning (indicators.*)::indicators as updated_ind
"""), locals())
if cursor.rowcount:
updated_ind = cursor.fetchone().updated_ind
log.info("Updated extra details on {0} {1}".format(
updated_ind, updated_ind.universe))
return updated_ind
else:
raise KeyError("Could not find indicator {0}!".format(title))
def __repr__(self):
return """<{0}.{1} (title="{2}")>""".format(
self.__class__.__module__,
self.__class__.__name__,
self.title)
def lookup_my_racial_split(self, pgconn, location_uuid):
""""
Looks up an indicator location value racial split
"""
racial_indicators= IndicatorLocationValue.find_racial_sub_indicators(
self.title)
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(textwrap.dedent("""
with indicator_value_location as
(
select
i.indicator_uuid, i.title, i.indicator_value_format,
l.title as location_title,
ilv.value, ilv.observation_timestamp, i.indicator_category
from indicator_location_values ilv
join indicators i on i.indicator_uuid = ilv.indicator_uuid
join locations l on l.location_uuid = ilv.location_uuid
where l.location_uuid = %(location_uuid)s
and i.title = any(%(indicators)s)
and ilv.visible = true
--and ilv.value != 999999
order by ilv.observation_timestamp asc
)
select (i.*)::indicators as indicator,
array_to_json(array_agg(ilv.*)) as indicator_values
from indicator_value_location ilv
join indicators i on ilv.indicator_uuid = i.indicator_uuid
group by (i.*)
"""), dict(location_uuid=location_uuid,
indicators=racial_indicators))
self.racial_split = [row for row in cursor.fetchall()]
return self
def lookup_cv_and_moe(self, pgconn, location_uuid):
""""
Looks up an indicator location value racial split
"""
cv_ind = 'cv' + self.title
m_ind = 'm' + self.title
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(textwrap.dedent("""
with indicator_value_location as
(
select
i.indicator_uuid, i.title, i.indicator_value_format,
l.title as location_title,
ilv.value, ilv.observation_timestamp, i.indicator_category
from indicator_location_values ilv
join indicators i on i.indicator_uuid = ilv.indicator_uuid
join locations l on l.location_uuid = ilv.location_uuid
where l.location_uuid = %(location_uuid)s
and i.title = any(%(indicators)s)
and ilv.visible = true
--and ilv.value != 999999
order by ilv.observation_timestamp asc
)
select (i.*)::indicators as indicator,
array_to_json(array_agg(ilv.*)) as indicator_values
from indicator_value_location ilv
join indicators i on ilv.indicator_uuid = i.indicator_uuid
group by (i.*)
"""), dict(location_uuid=location_uuid,
indicators=[cv_ind, m_ind]))
if cursor.rowcount > 1:
self.indicator_CV, self.indicator_moe = cursor.fetchall()
return self
def distinct_observation_timestamps(self, pgconn):
"""
Give us the distinct observable_timestamps for a given
indicator
"""
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute(textwrap.dedent("""
select distinct observation_timestamp,
observation_timestamp_label
from indicator_location_values
where indicator_uuid = %(indicator_uuid)s
and visible = True
order by observation_timestamp asc;
"""), dict(indicator_uuid=self.indicator_uuid))
for row in cursor.fetchall():
yield row
def all_indicator_location_values(self, pgconn, order_by_area=False):
"""
Give us all the values for a given indicator
across all times and locations
"""
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if order_by_area:
order_by_clause = "order by st_area(l.location_shape) desc"
else:
order_by_clause = "order by l.title asc"
qry = textwrap.dedent("""
select (l.*)::locations as location,
st_area(l.location_shape) as location_area,
array_to_json(array_agg((ilv.*)::indicator_location_values))
as indicator_location_values
from indicator_location_values ilv
join locations l on l.location_uuid = ilv.location_uuid
where indicator_uuid = %(indicator_uuid)s
and l.display_me = true
group by l.location_uuid
{order_by_clause}
""")
qry = qry.format(order_by_clause=order_by_clause)
cursor.execute(qry, dict(indicator_uuid=self.indicator_uuid))
for row in cursor.fetchall():
yield row
def all_indicator_categories(pgconn):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select category from indicator_categories
"""))
return [row.category for row in cursor]
class IndicatorCategoryFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return IndicatorCategory(**d)
class IndicatorCategory(RelationWrapper):
def __init__(self, category, description, inserted, updated):
self.category = category
self.description = description
self.inserted = inserted
self.updated = updated
@classmethod
def all(cls, pgconn):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select (indicator_categories.*)::indicator_categories ic
from indicator_categories
"""))
for row in cursor:
yield row.ic
@classmethod
def insert(cls, pgconn, category, description):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
insert into indicator_categories
(category, description)
values
(%s, %s)
returning indicator_categories.*::indicator_categories as ic
"""), [category, description])
return cursor.fetchone().ic
@classmethod
def by_category(cls, pgconn, category):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select indicator_categories.*::indicator_categories as ic
from indicator_categories
where category = %s
"""), [category])
if cursor.rowcount:
return cursor.fetchone().ic
else:
raise KeyError("No indicator_category {0}!".format(
category))
class IndicatorLocationValueFactory(psycopg2.extras.CompositeCaster):
def make(self, values):
d = dict(zip(self.attnames, values))
return IndicatorLocationValue(**d)
class IndicatorLocationValue(RelationWrapper):
def __init__(self, indicator_uuid, location_uuid,
observation_timestamp, observation_range,
value, observation_timestamp_label, visible, inserted, updated):
self.indicator_uuid = indicator_uuid
self.location_uuid = location_uuid
self.observation_timestamp = observation_timestamp
self.observation_range = observation_range
self.observation_timestamp_label = observation_timestamp_label
self.value = value
self.visible = visible
self.inserted = inserted
self.updated = updated
@classmethod
def insert(cls, pgconn, indicator_uuid, location_uuid,
observation_timestamp, observation_range, value):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
insert into indicator_location_values
(
indicator_uuid, location_uuid, observation_timestamp,
observation_range, value
)
values
(%s, %s, %s, %s, %s)
returning
(indicator_location_values.*)::indicator_location_values as indlocval
"""), [indicator_uuid, location_uuid, observation_timestamp,
observation_range, value])
return cursor.fetchone().indlocval
@classmethod
def by_ilo(cls, pgconn, indicator, location, observation_timestamp):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select indicator_location_values.*::indicator_location_values as ilv
from indicator_location_values
where (indicator_uuid, location_uuid, observation_timestamp)
= (%s, %s, %s)
"""), [indicator, location, observation_timestamp])
if cursor.rowcount:
return cursor.fetchone().ilv
else:
raise KeyError("Sorry, no ILV with {0}, {1}, {2} found!".format(
indicator,
location,
observation_timestamp))
@classmethod
def update_value(cls, pgconn, indicator, location,
observation_timestamp, value, visible=True):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
update indicator_location_values
set value = %s, visible = %s
where (indicator_uuid, location_uuid, observation_timestamp)
= (%s, %s, %s)
returning indicator_location_values.*::indicator_location_values as ilv
"""), [value, visible, indicator, location, observation_timestamp])
if cursor.rowcount:
ilv = cursor.fetchone().ilv
log.info("Updated {0}'s value to {1}.".format(ilv, value))
return ilv
else:
raise KeyError("Sorry, no ILV with {0}, {1}, {2} found!".format(
indicator,
location,
observation_timestamp))
def update_my_value(self, pgconn, new_value, visible=True):
if float(new_value) != self.value or self.visible != visible:
return self.update_value(
pgconn,
self.indicator_uuid,
self.location_uuid,
self.observation_timestamp,
float(new_value),
visible=visible)
@staticmethod
def look_up_racial_split(pgconn, indicator_title,
location_uuid, dt):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select
(indicators.*)::indicators as i,
(ilv.*) as indicator_value,
indicators.chart_label,
round(ilv.value - ilv_moe.value) as floor,
round(ilv.value + ilv_moe.value) as ceiling
from indicator_location_values ilv
join indicators
on ilv.indicator_uuid = indicators.indicator_uuid
left join indicators moe
on 'm' || indicators.title = moe.title
left join indicator_location_values ilv_moe
on moe.indicator_uuid = ilv_moe.indicator_uuid
and ilv_moe.location_uuid = %(location_uuid)s
and ilv_moe .observation_timestamp = %(dt)s
where indicators.title = any (%(race_indicator_titles)s)
and ilv.location_uuid = %(location_uuid)s
and ilv.observation_timestamp = %(dt)s
order by indicators.pretty_label
"""), dict(
race_indicator_titles=IndicatorLocationValue.find_racial_sub_indicators(indicator_title),
location_uuid=location_uuid,
dt=dt))
for row in cursor:
yield row._asdict()
@staticmethod
def find_racial_sub_indicators(indicator_title):
"""
The CWRU folks have no single pattern for how they racial splits
on statistics.
"""
# This one is my favorite -- it is completely unlike the other
# patterns.
if indicator_title == "pop":
return ["nhw", "nhb", "nhapi", "nho", "hisp"]
# rpass50 => w_rpass50
if indicator_title in set([
"rpassed3", "rpassed4", "rpassed6", "rpassed10",
"mpassed3", "mpassed4", "mpassed6", "mpassed10"
]):
return ["{0}_{1}".format(c, indicator_title) for c in 'abhow']
# _rpass50 => _w_rpass50
elif indicator_title in set([
"_rpassed50", "_rpassed20", "_rpassed10", "_rpassed41",
"_mpassed50", "_mpassed20", "_mpassed10", "_mpassed41",
]):
return ["_{0}{1}".format(c, indicator_title) for c in 'abhow']
# _attend => w_attend
elif indicator_title in set(["_attend"]):
return ["{0}{1}".format(c, indicator_title) for c in 'abhow']
# _emp => _wemp
elif indicator_title in set(["_emp", "_lf", "_lshs", "_hsgrad",
"_somecollassoc", "_bsp", "_bpv", "_native", "_foreign",
"_samehse1y", "_diffhs1y","_drove", "_walk", "_public_tran",
"_other_tran", "_workathome"
]):
log.info(indicator_title)
return ["_{0}{1}".format(c, indicator_title[1:]) for c in 'abhow']
# t_cburden50p => w_cburden50p
elif indicator_title.startswith("t_"):
return ["{0}{1}".format(c, indicator_title[1:]) for c in 'abhow']
elif indicator_title.startswith("_hh"):
return ["_{0}{1}".format(c, indicator_title[1:]) for c in 'abhow']
# _t_cburden50p => _w_cburden50p
elif indicator_title.startswith("_t_c"):
return ["_{0}_{1}".format(c, indicator_title[3:]) for c in 'abhow']
# _pa_snap => _wpa_snap
elif indicator_title.startswith("_pa_snap"):
return ["_{0}{1}".format(c, indicator_title[1:]) for c in 'abhow']
elif indicator_title.startswith("_"):
return ["_{0}{1}".format(c, indicator_title) for c in 'abhow']
# _t_cburden50p => _w_cburden50p
elif indicator_title.startswith("_"):
return ["_{0}_{1}".format(c, indicator_title[3:]) for c in 'abhow']
# xyz => wxyz
else:
return ["{0}{1}".format(c, indicator_title) for c in 'abhow']
@staticmethod
def find_available_observation_timestamps(pgconn, indicator_uuid,
location_uuid):
cursor = pgconn.cursor()
cursor.execute(textwrap.dedent("""
select ilv.observation_timestamp,
coalesce(
ilv.observation_timestamp_label,
to_char(
ilv.observation_timestamp,
'YYYY')) as observation_timestamp_label
from indicator_location_values ilv
where ilv.indicator_uuid = %(indicator_uuid)s
and ilv.location_uuid = %(location_uuid)s
order by ilv.observation_timestamp
"""), locals())
for row in cursor:
yield row._asdict()
| |
"""Test different accessory types: Camera."""
from uuid import UUID
from pyhap.accessory_driver import AccessoryDriver
import pytest
from homeassistant.components import camera, ffmpeg
from homeassistant.components.homekit.accessories import HomeBridge
from homeassistant.components.homekit.const import (
AUDIO_CODEC_COPY,
CHAR_MOTION_DETECTED,
CONF_AUDIO_CODEC,
CONF_LINKED_MOTION_SENSOR,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
DEVICE_CLASS_MOTION,
SERV_MOTION_SENSOR,
VIDEO_CODEC_COPY,
VIDEO_CODEC_H264_OMX,
)
from homeassistant.components.homekit.img_util import TurboJPEGSingleton
from homeassistant.components.homekit.type_cameras import Camera
from homeassistant.components.homekit.type_switches import Switch
from homeassistant.const import ATTR_DEVICE_CLASS, STATE_OFF, STATE_ON
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
from .common import mock_turbo_jpeg
from tests.async_mock import AsyncMock, MagicMock, PropertyMock, patch
MOCK_START_STREAM_TLV = "ARUCAQEBEDMD1QMXzEaatnKSQ2pxovYCNAEBAAIJAQECAgECAwEAAwsBAgAFAgLQAgMBHgQXAQFjAgQ768/RAwIrAQQEAAAAPwUCYgUDLAEBAwIMAQEBAgEAAwECBAEUAxYBAW4CBCzq28sDAhgABAQAAKBABgENBAEA"
MOCK_END_POINTS_TLV = "ARAzA9UDF8xGmrZykkNqcaL2AgEAAxoBAQACDTE5Mi4xNjguMjA4LjUDAi7IBAKkxwQlAQEAAhDN0+Y0tZ4jzoO0ske9UsjpAw6D76oVXnoi7DbawIG4CwUlAQEAAhCyGcROB8P7vFRDzNF2xrK1Aw6NdcLugju9yCfkWVSaVAYEDoAsAAcEpxV8AA=="
MOCK_START_STREAM_SESSION_UUID = UUID("3303d503-17cc-469a-b672-92436a71a2f6")
PID_THAT_WILL_NEVER_BE_ALIVE = 2147483647
async def _async_start_streaming(hass, acc):
"""Start streaming a camera."""
acc.set_selected_stream_configuration(MOCK_START_STREAM_TLV)
await acc.run_handler()
await hass.async_block_till_done()
async def _async_setup_endpoints(hass, acc):
"""Set camera endpoints."""
acc.set_endpoints(MOCK_END_POINTS_TLV)
await acc.run_handler()
await hass.async_block_till_done()
async def _async_reconfigure_stream(hass, acc, session_info, stream_config):
"""Reconfigure the stream."""
await acc.reconfigure_stream(session_info, stream_config)
await acc.run_handler()
await hass.async_block_till_done()
async def _async_stop_all_streams(hass, acc):
"""Stop all camera streams."""
await acc.stop()
await acc.run_handler()
await hass.async_block_till_done()
async def _async_stop_stream(hass, acc, session_info):
"""Stop a camera stream."""
await acc.stop_stream(session_info)
await acc.run_handler()
await hass.async_block_till_done()
@pytest.fixture()
def run_driver(hass):
"""Return a custom AccessoryDriver instance for HomeKit accessory init."""
with patch("pyhap.accessory_driver.Zeroconf"), patch(
"pyhap.accessory_driver.AccessoryEncoder"
), patch("pyhap.accessory_driver.HAPServer"), patch(
"pyhap.accessory_driver.AccessoryDriver.publish"
), patch(
"pyhap.accessory_driver.AccessoryDriver.persist"
):
yield AccessoryDriver(
pincode=b"123-45-678", address="127.0.0.1", loop=hass.loop
)
def _get_exits_after_startup_mock_ffmpeg():
"""Return a ffmpeg that will have an invalid pid."""
ffmpeg = MagicMock()
type(ffmpeg.process).pid = PropertyMock(return_value=PID_THAT_WILL_NEVER_BE_ALIVE)
ffmpeg.open = AsyncMock(return_value=True)
ffmpeg.close = AsyncMock(return_value=True)
ffmpeg.kill = AsyncMock(return_value=True)
return ffmpeg
def _get_working_mock_ffmpeg():
"""Return a working ffmpeg."""
ffmpeg = MagicMock()
ffmpeg.open = AsyncMock(return_value=True)
ffmpeg.close = AsyncMock(return_value=True)
ffmpeg.kill = AsyncMock(return_value=True)
return ffmpeg
def _get_failing_mock_ffmpeg():
"""Return an ffmpeg that fails to shutdown."""
ffmpeg = MagicMock()
type(ffmpeg.process).pid = PropertyMock(return_value=PID_THAT_WILL_NEVER_BE_ALIVE)
ffmpeg.open = AsyncMock(return_value=False)
ffmpeg.close = AsyncMock(side_effect=OSError)
ffmpeg.kill = AsyncMock(side_effect=OSError)
return ffmpeg
async def test_camera_stream_source_configured(hass, run_driver, events):
"""Test a camera that can stream with a configured source."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{CONF_STREAM_SOURCE: "/dev/null", CONF_SUPPORT_AUDIO: True},
)
not_camera_acc = Switch(hass, run_driver, "Switch", entity_id, 4, {},)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
bridge.add_accessory(not_camera_acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
working_ffmpeg = _get_working_mock_ffmpeg()
session_info = acc.sessions[MOCK_START_STREAM_SESSION_UUID]
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value=None,
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=working_ffmpeg,
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
expected_output = (
"-map 0:v:0 -an -c:v libx264 -profile:v high -tune zerolatency -pix_fmt "
"yuv420p -r 30 -b:v 299k -bufsize 1196k -maxrate 299k -payload_type 99 -ssrc {v_ssrc} -f "
"rtp -srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params "
"zdPmNLWeI86DtLJHvVLI6YPvqhVeeiLsNtrAgbgL "
"srtp://192.168.208.5:51246?rtcpport=51246&localrtcpport=51246&pkt_size=1316 -map 0:a:0 "
"-vn -c:a libopus -application lowdelay -ac 1 -ar 24k -b:a 24k -bufsize 96k -payload_type "
"110 -ssrc {a_ssrc} -f rtp -srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params "
"shnETgfD+7xUQ8zRdsaytY11wu6CO73IJ+RZVJpU "
"srtp://192.168.208.5:51108?rtcpport=51108&localrtcpport=51108&pkt_size=188"
)
working_ffmpeg.open.assert_called_with(
cmd=[],
input_source="-i /dev/null",
output=expected_output.format(**session_info),
stdout_pipe=False,
)
await _async_setup_endpoints(hass, acc)
working_ffmpeg = _get_working_mock_ffmpeg()
session_info = acc.sessions[MOCK_START_STREAM_SESSION_UUID]
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="rtsp://example.local",
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=working_ffmpeg,
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
# Calling a second time should not throw
await _async_stop_all_streams(hass, acc)
turbo_jpeg = mock_turbo_jpeg(
first_width=16, first_height=12, second_width=300, second_height=200
)
with patch("turbojpeg.TurboJPEG", return_value=turbo_jpeg):
TurboJPEGSingleton()
assert await hass.async_add_executor_job(
acc.get_snapshot, {"aid": 2, "image-width": 300, "image-height": 200}
)
# Verify the bridge only forwards get_snapshot for
# cameras and valid accessory ids
assert await hass.async_add_executor_job(
bridge.get_snapshot, {"aid": 2, "image-width": 300, "image-height": 200}
)
with pytest.raises(ValueError):
assert await hass.async_add_executor_job(
bridge.get_snapshot, {"aid": 3, "image-width": 300, "image-height": 200}
)
with pytest.raises(ValueError):
assert await hass.async_add_executor_job(
bridge.get_snapshot, {"aid": 4, "image-width": 300, "image-height": 200}
)
async def test_camera_stream_source_configured_with_failing_ffmpeg(
hass, run_driver, events
):
"""Test a camera that can stream with a configured source with ffmpeg failing."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{CONF_STREAM_SOURCE: "/dev/null", CONF_SUPPORT_AUDIO: True},
)
not_camera_acc = Switch(hass, run_driver, "Switch", entity_id, 4, {},)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
bridge.add_accessory(not_camera_acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="rtsp://example.local",
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=_get_failing_mock_ffmpeg(),
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
# Calling a second time should not throw
await _async_stop_all_streams(hass, acc)
async def test_camera_stream_source_found(hass, run_driver, events):
"""Test a camera that can stream and we get the source from the entity."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(hass, run_driver, "Camera", entity_id, 2, {},)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="rtsp://example.local",
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=_get_working_mock_ffmpeg(),
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
await _async_setup_endpoints(hass, acc)
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value="rtsp://example.local",
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=_get_working_mock_ffmpeg(),
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
async def test_camera_stream_source_fails(hass, run_driver, events):
"""Test a camera that can stream and we cannot get the source from the entity."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(hass, run_driver, "Camera", entity_id, 2, {},)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
side_effect=OSError,
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=_get_working_mock_ffmpeg(),
):
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
async def test_camera_with_no_stream(hass, run_driver, events):
"""Test a camera that cannot stream."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(hass, camera.DOMAIN, {camera.DOMAIN: {}})
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(hass, run_driver, "Camera", entity_id, 2, {},)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
await _async_start_streaming(hass, acc)
await _async_stop_all_streams(hass, acc)
with pytest.raises(HomeAssistantError):
await hass.async_add_executor_job(
acc.get_snapshot, {"aid": 2, "image-width": 300, "image-height": 200}
)
async def test_camera_stream_source_configured_and_copy_codec(hass, run_driver, events):
"""Test a camera that can stream with a configured source."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{
CONF_STREAM_SOURCE: "/dev/null",
CONF_SUPPORT_AUDIO: True,
CONF_VIDEO_CODEC: VIDEO_CODEC_COPY,
CONF_AUDIO_CODEC: AUDIO_CODEC_COPY,
},
)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
session_info = acc.sessions[MOCK_START_STREAM_SESSION_UUID]
working_ffmpeg = _get_working_mock_ffmpeg()
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value=None,
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=working_ffmpeg,
):
await _async_start_streaming(hass, acc)
await _async_reconfigure_stream(hass, acc, session_info, {})
await _async_stop_stream(hass, acc, session_info)
await _async_stop_all_streams(hass, acc)
expected_output = (
"-map 0:v:0 -an -c:v copy -tune zerolatency -pix_fmt yuv420p -r 30 -b:v 299k "
"-bufsize 1196k -maxrate 299k -payload_type 99 -ssrc {v_ssrc} -f rtp -srtp_out_suite "
"AES_CM_128_HMAC_SHA1_80 -srtp_out_params zdPmNLWeI86DtLJHvVLI6YPvqhVeeiLsNtrAgbgL "
"srtp://192.168.208.5:51246?rtcpport=51246&localrtcpport=51246&pkt_size=1316 -map 0:a:0 "
"-vn -c:a copy -ac 1 -ar 24k -b:a 24k -bufsize 96k -payload_type 110 -ssrc {a_ssrc} "
"-f rtp -srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params "
"shnETgfD+7xUQ8zRdsaytY11wu6CO73IJ+RZVJpU "
"srtp://192.168.208.5:51108?rtcpport=51108&localrtcpport=51108&pkt_size=188"
)
working_ffmpeg.open.assert_called_with(
cmd=[],
input_source="-i /dev/null",
output=expected_output.format(**session_info),
stdout_pipe=False,
)
async def test_camera_streaming_fails_after_starting_ffmpeg(hass, run_driver, events):
"""Test a camera that can stream with a configured source."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{
CONF_STREAM_SOURCE: "/dev/null",
CONF_SUPPORT_AUDIO: True,
CONF_VIDEO_CODEC: VIDEO_CODEC_H264_OMX,
CONF_AUDIO_CODEC: AUDIO_CODEC_COPY,
},
)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
await _async_setup_endpoints(hass, acc)
session_info = acc.sessions[MOCK_START_STREAM_SESSION_UUID]
ffmpeg_with_invalid_pid = _get_exits_after_startup_mock_ffmpeg()
with patch(
"homeassistant.components.demo.camera.DemoCamera.stream_source",
return_value=None,
), patch(
"homeassistant.components.homekit.type_cameras.HAFFmpeg",
return_value=ffmpeg_with_invalid_pid,
):
await _async_start_streaming(hass, acc)
await _async_reconfigure_stream(hass, acc, session_info, {})
# Should not throw
await _async_stop_stream(hass, acc, {"id": "does_not_exist"})
await _async_stop_all_streams(hass, acc)
expected_output = (
"-map 0:v:0 -an -c:v h264_omx -profile:v high -tune zerolatency -pix_fmt yuv420p -r 30 -b:v 299k "
"-bufsize 1196k -maxrate 299k -payload_type 99 -ssrc {v_ssrc} -f rtp -srtp_out_suite "
"AES_CM_128_HMAC_SHA1_80 -srtp_out_params zdPmNLWeI86DtLJHvVLI6YPvqhVeeiLsNtrAgbgL "
"srtp://192.168.208.5:51246?rtcpport=51246&localrtcpport=51246&pkt_size=1316 -map 0:a:0 "
"-vn -c:a copy -ac 1 -ar 24k -b:a 24k -bufsize 96k -payload_type 110 -ssrc {a_ssrc} "
"-f rtp -srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params "
"shnETgfD+7xUQ8zRdsaytY11wu6CO73IJ+RZVJpU "
"srtp://192.168.208.5:51108?rtcpport=51108&localrtcpport=51108&pkt_size=188"
)
ffmpeg_with_invalid_pid.open.assert_called_with(
cmd=[],
input_source="-i /dev/null",
output=expected_output.format(**session_info),
stdout_pipe=False,
)
async def test_camera_with_linked_motion_sensor(hass, run_driver, events):
"""Test a camera with a linked motion sensor can update."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
motion_entity_id = "binary_sensor.motion"
hass.states.async_set(
motion_entity_id, STATE_ON, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{
CONF_STREAM_SOURCE: "/dev/null",
CONF_SUPPORT_AUDIO: True,
CONF_VIDEO_CODEC: VIDEO_CODEC_H264_OMX,
CONF_AUDIO_CODEC: AUDIO_CODEC_COPY,
CONF_LINKED_MOTION_SENSOR: motion_entity_id,
},
)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
service = acc.get_service(SERV_MOTION_SENSOR)
assert service
char = service.get_characteristic(CHAR_MOTION_DETECTED)
assert char
assert char.value is True
hass.states.async_set(
motion_entity_id, STATE_OFF, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert char.value is False
char.set_value(True)
hass.states.async_set(
motion_entity_id, STATE_ON, {ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION}
)
await hass.async_block_till_done()
assert char.value is True
async def test_camera_with_a_missing_linked_motion_sensor(hass, run_driver, events):
"""Test a camera with a configured linked motion sensor that is missing."""
await async_setup_component(hass, ffmpeg.DOMAIN, {ffmpeg.DOMAIN: {}})
await async_setup_component(
hass, camera.DOMAIN, {camera.DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
motion_entity_id = "binary_sensor.motion"
entity_id = "camera.demo_camera"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = Camera(
hass,
run_driver,
"Camera",
entity_id,
2,
{CONF_LINKED_MOTION_SENSOR: motion_entity_id},
)
bridge = HomeBridge("hass", run_driver, "Test Bridge")
bridge.add_accessory(acc)
await acc.run_handler()
assert acc.aid == 2
assert acc.category == 17 # Camera
assert not acc.get_service(SERV_MOTION_SENSOR)
| |
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Oct 26 2018)
## http://www.wxformbuilder.org/
##
## PLEASE DO *NOT* EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
###########################################################################
## Class MainWindow
###########################################################################
class MainWindow ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"Hangman", pos = wx.DefaultPosition, size = wx.Size( 500,400 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.Size( 500,400 ), wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.m_word = wx.TextCtrl( self, wx.ID_ANY, u"HANGMAN", wx.DefaultPosition, wx.DefaultSize, wx.TE_READONLY|wx.TE_CENTER )
self.m_word.SetFont( wx.Font( 36, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, "Consolas" ) )
bSizer1.Add( self.m_word, 0, wx.ALL|wx.EXPAND, 5 )
buttonsSizer = wx.GridSizer( 4, 7, 0, 0 )
self.m_btn_Key0 = wx.Button( self, wx.ID_ANY, u"A", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key0, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key1 = wx.Button( self, wx.ID_ANY, u"B", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key1, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key2 = wx.Button( self, wx.ID_ANY, u"C", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key2, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key3 = wx.Button( self, wx.ID_ANY, u"D", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key3, 1, wx.EXPAND|wx.ALL, 5 )
self.m_btn_Key4 = wx.Button( self, wx.ID_ANY, u"E", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key4, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key5 = wx.Button( self, wx.ID_ANY, u"F", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key5, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key6 = wx.Button( self, wx.ID_ANY, u"G", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key6, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key7 = wx.Button( self, wx.ID_ANY, u"H", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key7, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key8 = wx.Button( self, wx.ID_ANY, u"I", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key8, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key9 = wx.Button( self, wx.ID_ANY, u"J", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key9, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key10 = wx.Button( self, wx.ID_ANY, u"K", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key10, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key11 = wx.Button( self, wx.ID_ANY, u"L", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key11, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key12 = wx.Button( self, wx.ID_ANY, u"M", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key12, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key13 = wx.Button( self, wx.ID_ANY, u"N", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key13, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key14 = wx.Button( self, wx.ID_ANY, u"O", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key14, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key15 = wx.Button( self, wx.ID_ANY, u"P", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key15, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key16 = wx.Button( self, wx.ID_ANY, u"Q", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key16, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key17 = wx.Button( self, wx.ID_ANY, u"R", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key17, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key18 = wx.Button( self, wx.ID_ANY, u"S", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key18, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key19 = wx.Button( self, wx.ID_ANY, u"T", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key19, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key20 = wx.Button( self, wx.ID_ANY, u"U", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key20, 1, wx.ALL|wx.EXPAND, 5 )
buttonsSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_btn_Key21 = wx.Button( self, wx.ID_ANY, u"V", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key21, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key22 = wx.Button( self, wx.ID_ANY, u"W", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key22, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key23 = wx.Button( self, wx.ID_ANY, u"X", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key23, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key24 = wx.Button( self, wx.ID_ANY, u"Y", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key24, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Key25 = wx.Button( self, wx.ID_ANY, u"Z", wx.DefaultPosition, wx.DefaultSize, 0 )
buttonsSizer.Add( self.m_btn_Key25, 1, wx.ALL|wx.EXPAND, 5 )
buttonsSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer1.Add( buttonsSizer, 1, wx.EXPAND, 5 )
sbSizer1 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Lives" ), wx.VERTICAL )
bSizer4 = wx.BoxSizer( wx.HORIZONTAL )
self.m_gauge_lives = wx.Gauge( sbSizer1.GetStaticBox(), wx.ID_ANY, 6, wx.DefaultPosition, wx.DefaultSize, wx.GA_HORIZONTAL )
self.m_gauge_lives.SetValue( 6 )
bSizer4.Add( self.m_gauge_lives, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
bSizer5 = wx.BoxSizer( wx.VERTICAL )
bSizer5.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer6 = wx.BoxSizer( wx.HORIZONTAL )
self.m_livesCount = wx.StaticText( sbSizer1.GetStaticBox(), wx.ID_ANY, u"6", wx.DefaultPosition, wx.Size( 15,-1 ), wx.ALIGN_CENTER_HORIZONTAL )
self.m_livesCount.Wrap( -1 )
bSizer6.Add( self.m_livesCount, 0, wx.ALL, 5 )
self.m_spinBtn1 = wx.SpinButton( sbSizer1.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
bSizer6.Add( self.m_spinBtn1, 1, wx.EXPAND, 5 )
bSizer5.Add( bSizer6, 0, 0, 5 )
bSizer5.Add( ( 0, 0), 1, wx.EXPAND, 5 )
bSizer4.Add( bSizer5, 0, wx.EXPAND, 5 )
sbSizer1.Add( bSizer4, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer1, 0, wx.EXPAND, 5 )
sbSizer2 = wx.StaticBoxSizer( wx.StaticBox( self, wx.ID_ANY, u"Statistics" ), wx.VERTICAL )
bSizer2 = wx.BoxSizer( wx.HORIZONTAL )
self.m_staticText1 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games played:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText1.Wrap( -1 )
bSizer2.Add( self.m_staticText1, 0, wx.ALL, 5 )
self.m_gamesPlayed = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesPlayed.Wrap( -1 )
bSizer2.Add( self.m_gamesPlayed, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText2 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games won", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText2.Wrap( -1 )
bSizer2.Add( self.m_staticText2, 0, wx.ALL, 5 )
self.m_gamesWon = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesWon.Wrap( -1 )
bSizer2.Add( self.m_gamesWon, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText5 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Games lost", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText5.Wrap( -1 )
bSizer2.Add( self.m_staticText5, 0, wx.ALL, 5 )
self.m_gamesLost = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_gamesLost.Wrap( -1 )
bSizer2.Add( self.m_gamesLost, 0, wx.ALL, 5 )
bSizer2.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.m_staticText7 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"Succes percentage", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText7.Wrap( -1 )
bSizer2.Add( self.m_staticText7, 0, wx.ALL, 5 )
self.m_successPercent = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"XX", wx.DefaultPosition, wx.Size( 25,-1 ), wx.ALIGN_RIGHT )
self.m_successPercent.Wrap( -1 )
bSizer2.Add( self.m_successPercent, 0, wx.TOP|wx.BOTTOM|wx.LEFT, 5 )
self.m_staticText9 = wx.StaticText( sbSizer2.GetStaticBox(), wx.ID_ANY, u"%", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText9.Wrap( -1 )
bSizer2.Add( self.m_staticText9, 0, wx.TOP|wx.BOTTOM|wx.RIGHT, 5 )
sbSizer2.Add( bSizer2, 1, wx.EXPAND, 5 )
bSizer1.Add( sbSizer2, 0, wx.EXPAND, 5 )
gSizer3 = wx.GridSizer( 1, 2, 0, 0 )
self.m_btn_newGame = wx.Button( self, wx.ID_ANY, u"New Game", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer3.Add( self.m_btn_newGame, 1, wx.ALL|wx.EXPAND, 5 )
self.m_btn_Load = wx.Button( self, wx.ID_ANY, u"Load Words", wx.DefaultPosition, wx.DefaultSize, 0 )
gSizer3.Add( self.m_btn_Load, 1, wx.ALL|wx.EXPAND, 5 )
bSizer1.Add( gSizer3, 0, wx.EXPAND, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
# Connect Events
self.m_btn_Key0.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key1.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key2.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key3.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key4.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key5.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key6.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key7.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key8.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key9.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key10.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key11.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key12.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key13.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key14.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key15.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key16.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key17.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key18.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key19.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key20.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key21.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key22.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key23.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key24.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_btn_Key25.Bind( wx.EVT_BUTTON, self.LetterButtonClicked )
self.m_spinBtn1.Bind( wx.EVT_SPIN_DOWN, self.OnLivesDown )
self.m_spinBtn1.Bind( wx.EVT_SPIN_UP, self.OnLivesUp )
self.m_btn_newGame.Bind( wx.EVT_BUTTON, self.NewGameButtonClicked )
self.m_btn_Load.Bind( wx.EVT_BUTTON, self.LoadButtonClicked )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def LetterButtonClicked( self, event ):
event.Skip()
def OnLivesDown( self, event ):
event.Skip()
def OnLivesUp( self, event ):
event.Skip()
def NewGameButtonClicked( self, event ):
event.Skip()
def LoadButtonClicked( self, event ):
event.Skip()
| |
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import sys
import numpy as np
class Train(object):
""" Basic training class. Allows for either minimization or maximization, though all implementations may not
support both.
"""
def __init__(self, goal_minimize=True):
self.max_iterations = 100000
self.position = []
self.best_score = 0
self.goal_minimize = goal_minimize
self.display_final = True
self.display_iteration = False
self.stop_score = None
def better_than(self, is_this, than_that):
"""Determine if one score is better than the other, based on minimization settings.
@param is_this: The first score to compare.
@param than_that: The second score to compare.
@return: True, if the first score is better than the second.
"""
if self.goal_minimize:
return is_this < than_that
else:
return is_this > than_that
def should_stop(self, iteration, best_score):
""" Determine if we should stop.
@param iteration: The current iteration.
@param best_score: The current best score.
@return: True, if we should stop.
"""
if iteration > self.max_iterations:
return True
if self.stop_score is not None:
if self.better_than(best_score, self.stop_score):
return True
return False
class TrainGreedRandom(Train):
"""
The Greedy Random learning algorithm is a very primitive random-walk algorithm that only takes steps that serve
to move the Machine Learning algorithm to a more optimal position. This learning algorithm essentially chooses
random locations for the long term memory until a better set is found.
http://en.wikipedia.org/wiki/Random_walk
"""
def __init__(self, low, high, goal_minimize=True):
"""
Construct a greedy random trainer.
@param low: The low end of random numbers to generate.
@param high: The high end of random numbers to generate.
@param goal_minimize: Is the goal to minimize?
"""
self.high = low
self.low = high
Train.__init__(self, goal_minimize)
def train(self, x0, funct):
"""
Train with the specified score function.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
trial_position = list(self.position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
if self.better_than(trial_score, self.best_score):
self.best_score = trial_score
self.position = trial_position
current = funct(self.position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def perform_randomization(self, vec):
for i in xrange(0, len(vec)):
vec[i] = np.random.uniform(self.low, self.high)
class TrainHillClimb(Train):
"""
Train using hill climbing. Hill climbing can be used to optimize the long term memory of a Machine Learning
Algorithm. This is done by moving the current long term memory values to a new location if that new location
gives a better score from the scoring function.
http://en.wikipedia.org/wiki/Hill_climbing
"""
def __init__(self, goal_minimize=True):
Train.__init__(self, goal_minimize)
def train(self, x0, funct, acceleration=1.2, step_size=1.0):
"""
Train up to the specified maximum number of iterations using hill climbing.
@param x0: The initial vector for long-term memory.
@param funct: The score function. We attempt to minimize or maximize this.
@param acceleration: The acceleration (default=1.2)
@param step_size: The step size (default=1.0)
@return: The trained long-term memory vector.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
step_size = [step_size] * len(x0)
candidate = [0] * 5
candidate[0] = -acceleration
candidate[1] = -1 / acceleration
candidate[2] = 0
candidate[3] = 1 / acceleration
candidate[4] = acceleration
while not self.should_stop(iteration_number, self.best_score):
if self.goal_minimize:
best_step_score = sys.float_info.max
else:
best_step_score = sys.float_info.min
for dimension in xrange(0, len(self.position)):
best = -1
for i in xrange(0, len(candidate)):
# Take a step
self.position[dimension] += candidate[i] * step_size[dimension]
# Obtain new trial score.
trial_score = funct(self.position)
# Step back, we only want to try movement in one dimension.
self.position[dimension] -= candidate[i] * step_size[dimension]
# Record best step taken
if self.better_than(trial_score, best_step_score):
best_step_score = trial_score
best = i
if best != -1:
self.best_score = best_step_score
self.position[dimension] += candidate[best] * step_size[dimension]
step_size[dimension] += candidate[best]
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
class TrainAnneal(Train):
"""
Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm
that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a
material to increase the size of its crystals and reduce their defects, both are attributes of the material
that depend on its thermodynamic free energy.
The Simulated Annealing algorithm works by randomly changing a vector of doubles. This is the long term memory
of the Machine Learning algorithm. While this happens a temperature is slowly decreased. When this
temperature is higher, the Simulated Annealing algorithm is more likely to accept changes that have a higher
error (or energy) than the current state.
There are several important components to any Simulated Learning Algorithm:
First, the randomization technique. This is performed by the method performRandomize. To randomize
differently, override this method.
Secondly, the cooling schedule. This determines how quickly the current temperature will fall. This is
controlled by the coolingSchedule. To define a different cooling schedule, override this method.
Finally, the probability of accepting a higher-error (energy) solution. This is defined by a Probability
Distribution Function (PDF) contained in calcProbability. To define a different PDF, override this method.
http://en.wikipedia.org/wiki/Simulated_annealing
"""
def __init__(self, max_iterations=100, starting_temperature=400, ending_temperature=0.0001):
"""
Create a simulated annealing trainer.
@param max_iterations: The maximum number of iterations.
@param starting_temperature: The starting temperature.
@param ending_temperature: The ending temperature.
"""
Train.__init__(self, True)
self.max_iterations = max_iterations
self.starting_temperature = starting_temperature
self.ending_temperature = ending_temperature
self.cycles = 100
self.last_probability = 0
def train(self, x0, funct):
"""
Train for the specified number of iterations using simulated annealing. The temperature will be lowered
between the specified range at each iteration. You can also use the cycles property to set how many cycles
are executed at each iteration. Simulated annealing can only be used to minimize the score function.
@param x0: The initial long-term memory.
@param funct: The score function.
@return: The trained long-term memory.
"""
iteration_number = 1
self.position = list(x0)
self.best_score = funct(self.position)
current_score = self.best_score
current_position = list(x0)
while not self.should_stop(iteration_number, self.best_score):
# Clone current position, create a new array of same size.
current_temperature = self.cooling_schedule(iteration_number)
for c in range(0, self.cycles):
trial_position = list(current_position)
# Randomize trial position.
self.perform_randomization(trial_position)
# Obtain new trial score.
trial_score = funct(trial_position)
keep = False
if self.better_than(trial_score, current_score):
keep = True
else:
self.last_probability = self.calc_probability(current_score, trial_score, current_temperature)
if self.last_probability > np.random.uniform():
keep = True
if keep:
current_score = trial_score
current_position = list(trial_position)
if self.better_than(current_score, self.best_score):
self.best_score = current_score
self.position = list(current_position)
if self.display_iteration:
print("Iteration #" + str(iteration_number) + ", Score: " + str(self.best_score)
+ ",k=" + str(iteration_number)
+ ",kMax=" + str(self.max_iterations)
+ ",t=" + str(current_temperature) + ",prob=" + str(self.last_probability) + ","
+ str(current_score))
iteration_number += 1
if self.display_final:
print("Finished after " + str(iteration_number) + " iterations, final score is " + str(self.best_score))
return self.position
def calc_probability(self, error_current, error_new, t):
"""
Calculate the probability of accepting a worse position. This can be overriden to provide other
implementations.
@param error_current: The current error (score).
@param error_new: The new error (score)
@param t: The temperature.
@return: The probability of accepting the worse score.
"""
return np.exp(-(np.abs(error_new - error_current) / t))
def cooling_schedule(self, current_iteration):
"""
Determine the temperature for the specified iteration. This method can be overriden to provide other cooling
schedules.
@param current_iteration: The iteration number.
@return: The temperature.
"""
ex = float(current_iteration) / float(self.max_iterations)
return self.starting_temperature * (self.ending_temperature / self.starting_temperature) ** ex
def perform_randomization(self, vec):
"""
Randomize the provided position to move to a neighbor position. The provided method perterbs each vector
element by one tenth of a normally distributed random number. This works for many continuous problems,
however, a different method must be used for discrete problems.
@param vec:
@return:
"""
for i in range(0, len(vec)):
d = np.random.randn() / 10
vec[i] += d
| |
"""
G R A D I E N T - E N H A N C E D N E U R A L N E T W O R K S (G E N N)
Author: Steven H. Berguin <steven.berguin@gtri.gatech.edu>
This package is distributed under New BSD license.
"""
from smt.surrogate_models.surrogate_model import SurrogateModel
from smt.utils.neural_net.model import Model
import numpy as np
# ------------------------------------ S U P P O R T F U N C T I O N S -----------------------------------------------
def load_smt_data(model, xt, yt, dyt_dxt=None):
"""
Utility function to load SMT data more easily
:param model: SurrogateModel object for which to load training data
:param xt: smt data points at which response is evaluated
:param yt: response at xt
:param dyt_dxt: gradient at xt
"""
# Dimensionality
if len(xt.shape) == 1:
n_x = 1 # number of variables, x
m = xt.size
else:
m, n_x = xt.shape
if len(yt.shape) == 1:
n_y = 1 # number of responses, y
else:
n_y = yt.shape[1]
# Reshape arrays
xt = xt.reshape((m, n_x))
yt = yt.reshape((m, n_y))
# Load values
model.set_training_values(xt, yt)
# Load partials
if dyt_dxt is not None:
dyt_dxt = dyt_dxt.reshape((m, n_x))
for i in range(n_x):
model.set_training_derivatives(xt, dyt_dxt[:, i].reshape((m, 1)), i)
def smt_to_genn(training_points):
"""
Translate from SMT data structure to GENN data structure.
Concretely, this neural net module works with numpy arrays in the form of (X, Y, J) as defined here-under. However,
SMT uses a different format. Hence, we need a function that takes care of the translation.
:param: training_points -- dict, training data in the format used by surrogate_model.py (see SMT API)
Returns:
:return X -- a numpy matrix of input features of shape (n_x, m) where n_x = no. of inputs, m = no. of train examples
:return Y -- a numpy matrix of output labels of shape (n_y, m) where n_y = no. of outputs
:return J -- a numpy array of size (n_y, n_x, m) representing the Jacobian of Y w.r.t. X:
dY1/dX1 = J[0][0][:]
dY1/dX2 = J[0][1][:]
...
dY2/dX1 = J[1][0][:]
dY2/dX2 = J[1][1][:]
...
N.B. To retrieve the i^th example for dY2/dX1: J[1][0][i] for all i = 1,...,m
"""
# Retrieve training data from SMT training_points
xt, yt = training_points[None][
0
] # training_points[name][0] = [np.array(xt), np.array(yt)]
# Deduce number of dimensions and training examples
m, n_x = xt.shape
_, n_y = yt.shape
# Assign training data but transpose to match neural net implementation
X = xt
Y = yt
# Loop to retrieve each partial derivative from SMT training_points
J = np.zeros((m, n_x, n_y))
for k in range(0, n_x):
xt, dyt_dxt = training_points[None][k + 1]
# assert that dimensions match
assert xt.shape[0] == m
assert xt.shape[1] == n_x
assert dyt_dxt.shape[0] == m
assert dyt_dxt.shape[1] == n_y
# Assert that derivatives provided are for the same training points
assert xt.all() == X.all()
# Assign training derivative but transpose to match neural net implementation
J[:, k, :] = dyt_dxt
return X.T, Y.T, J.T
# ------------------------------------ C L A S S -----------------------------------------------------------------------
class GENN(SurrogateModel):
def _initialize(self):
"""API function: set default values for user options"""
declare = self.options.declare
declare("alpha", 0.5, types=(int, float), desc="optimizer learning rate")
declare(
"beta1", 0.9, types=(int, float), desc="Adam optimizer tuning parameter"
)
declare(
"beta2", 0.99, types=(int, float), desc="Adam optimizer tuning parameter"
)
declare("lambd", 0.1, types=(int, float), desc="regularization coefficient")
declare(
"gamma", 1.0, types=(int, float), desc="gradient-enhancement coefficient"
)
declare("deep", 2, types=int, desc="number of hidden layers")
declare("wide", 2, types=int, desc="number of nodes per hidden layer")
declare(
"mini_batch_size",
64,
types=int,
desc="split data into batches of specified size",
)
declare(
"num_epochs", 10, types=int, desc="number of random passes through the data"
)
declare(
"num_iterations",
100,
types=int,
desc="number of optimizer iterations per mini-batch",
)
declare(
"seed",
None,
types=int,
desc="random seed to ensure repeatability of results when desired",
)
declare("is_print", True, types=bool, desc="print progress (or not)")
self.supports["derivatives"] = True
self.supports["training_derivatives"] = True
self.name = "GENN"
self.model = Model()
self._is_trained = False
def _train(self):
"""
API function: train the neural net
"""
# Convert training data to format expected by neural net module
X, Y, J = smt_to_genn(self.training_points)
# If there are no training derivatives, turn off gradient-enhancement
if type(J) == np.ndarray and J.size == 0:
self.options["gamma"] = 0.0
# Get hyperparameters from SMT API
alpha = self.options["alpha"]
beta1 = self.options["beta1"]
beta2 = self.options["beta2"]
lambd = self.options["lambd"]
gamma = self.options["gamma"]
deep = self.options["deep"]
wide = self.options["wide"]
mini_batch_size = self.options["mini_batch_size"]
num_iterations = self.options["num_iterations"]
num_epochs = self.options["num_epochs"]
seed = self.options["seed"]
is_print = self.options["is_print"]
# number of inputs and outputs
n_x = X.shape[0]
n_y = Y.shape[0]
# Train neural net
self.model = Model.initialize(n_x, n_y, deep, wide)
self.model.train(
X=X,
Y=Y,
J=J,
num_iterations=num_iterations,
mini_batch_size=mini_batch_size,
num_epochs=num_epochs,
alpha=alpha,
beta1=beta1,
beta2=beta2,
lambd=lambd,
gamma=gamma,
seed=seed,
silent=not is_print,
)
self._is_trained = True
def _predict_values(self, x):
"""
API method: predict values using appropriate methods from the neural_network.py module
:param x: np.ndarray[n, nx] -- Input values for the prediction points
:return y: np.ndarray[n, ny] -- Output values at the prediction points
"""
return self.model.evaluate(x.T).T
def _predict_derivatives(self, x, kx):
"""
API method: predict partials using appropriate methods from the neural_network.py module
:param x: np.ndarray[n, nx] -- Input values for the prediction points
:param kx: int -- The 0-based index of the input variable with respect to which derivatives are desired
:return: dy_dx: np.ndarray[n, ny] -- partial derivatives
"""
return self.model.gradient(x.T)[:, kx, :].T
def plot_training_history(self):
if self._is_trained:
self.model.plot_training_history()
def goodness_of_fit(self, xv, yv, dyv_dxv):
"""
Compute metrics to evaluate goodness of fit and show actual by predicted plot
:param xv: np.ndarray[n, nx], x validation points
:param yv: np.ndarray[n, 1], y validation response
:param dyv_dxv: np.ndarray[n, ny], dydx validation derivatives
"""
# Store current training points
training_points = self.training_points
# Replace training points with test (validation) points
load_smt_data(self, xv, yv, dyv_dxv)
# Convert from SMT format to a more convenient format for GENN
X, Y, J = smt_to_genn(self.training_points)
# Generate goodness of fit plots
self.model.goodness_of_fit(X, Y)
# Restore training points
self.training_points = training_points
def run_example(is_gradient_enhancement=True): # pragma: no cover
"""Test and demonstrate GENN using a 1D example"""
import matplotlib.pyplot as plt
# Test function
f = lambda x: x * np.sin(x)
df_dx = lambda x: np.sin(x) + x * np.cos(x)
# Domain
lb = -np.pi
ub = np.pi
# Training data
m = 4
xt = np.linspace(lb, ub, m)
yt = f(xt)
dyt_dxt = df_dx(xt)
# Validation data
xv = lb + np.random.rand(30, 1) * (ub - lb)
yv = f(xv)
dyv_dxv = df_dx(xv)
# Initialize GENN object
genn = GENN()
genn.options["alpha"] = 0.1
genn.options["beta1"] = 0.9
genn.options["beta2"] = 0.99
genn.options["lambd"] = 0.1
genn.options["gamma"] = int(is_gradient_enhancement)
genn.options["deep"] = 2
genn.options["wide"] = 6
genn.options["mini_batch_size"] = 64
genn.options["num_epochs"] = 20
genn.options["num_iterations"] = 100
genn.options["is_print"] = True
# Load data
load_smt_data(genn, xt, yt, dyt_dxt)
# Train
genn.train()
genn.plot_training_history()
genn.goodness_of_fit(xv, yv, dyv_dxv)
# Plot comparison
if genn.options["gamma"] == 1.0:
title = "with gradient enhancement"
else:
title = "without gradient enhancement"
x = np.arange(lb, ub, 0.01)
y = f(x)
y_pred = genn.predict_values(x)
fig, ax = plt.subplots()
ax.plot(x, y_pred)
ax.plot(x, y, "k--")
ax.plot(xv, yv, "ro")
ax.plot(xt, yt, "k+", mew=3, ms=10)
ax.set(xlabel="x", ylabel="y", title=title)
ax.legend(["Predicted", "True", "Test", "Train"])
plt.show()
if __name__ == "__main__": # pragma: no cover
run_example(is_gradient_enhancement=True)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument, import-outside-toplevel, no-value-for-parameter
"""A set of passes to legalize some of operations for the NPU"""
from typing import List, Type, Callable
import math
import numpy as np # type: ignore
import tvm # type: ignore
from tvm import relay
from tvm import ir
from tvm.relay.dataflow_pattern import DFPatternCallback # type: ignore
from tvm.relay.dataflow_pattern import wildcard
from tvm.relay.dataflow_pattern import is_op
from tvm.relay.dataflow_pattern import rewrite
from tvm.relay.dataflow_pattern import CallPattern
from tvm.relay.backend.contrib.ethosu import op as ethosu_ops # type: ignore
from tvm.relay.backend.contrib.ethosu import vela_api
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.op.contrib import ethosu as ethosu_patterns # type: ignore
class SplitRewriter(DFPatternCallback):
"""This rewriting converts split operations into a sequence of
strided_slice operations, because codegen is going to be based
on strided_slices that will define the slice of the tensor that
will be fed to the consumer.
"""
def __init__(self):
super().__init__(require_type=True)
self.split_in = wildcard()
self.pattern = is_op("split")(self.split_in)
@staticmethod
def get_section_begin_coords(split: tvm.relay.Expr) -> List[int]:
"""Currently, the split operator takes an array of indices or an integer
indicating the number of splits. However, its an array of indices could
represent both cases, therefore this function just make it an array of
indices where each index represent the co-ordinate of beginning of each
section -- defines as section begins.
Parameters
----------
split : tvm.relay.Expr
The Relay Call expression for a split operator
Returns
-------
section_begins : List[int]
A list containing integers corresponding to section
begins
"""
indices_or_sections = split.attrs.indices_or_sections
input_shape = split.args[0].checked_type.shape
split_axis = split.attrs.axis
if isinstance(indices_or_sections, tvm.ir.container.Array):
# 0 is the beginning of the first section.
return [0] + list(indices_or_sections)
split_axis_len = input_shape[split_axis].value
section_length = split_axis_len // indices_or_sections.value
return list(range(0, split_axis_len, section_length))
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_begins = list()
split_ends = list()
section_begins_in_split_axis = self.get_section_begin_coords(post)
for split_cord in section_begins_in_split_axis:
# first begin is [0, 0, ... , 0]
begin_shape = [0 for i in range(len(split_input.checked_type.shape))]
begin_shape[post.attrs.axis] = split_cord
split_begins.append(begin_shape)
end_shape = list(split_input.checked_type.shape)
# Only the split axis coordinate changes
end_shape[post.attrs.axis] = split_cord
split_ends.append(end_shape)
# Coordinates needs to be shifted left because beginning
# of the next section is the end of the previous
split_ends = split_ends[1:]
# Last section end is the shape of the tensor itself.
split_ends.append(list(split_input.checked_type.shape))
strided_slices = list()
for sb, se in zip(split_begins, split_ends):
strided_slices.append(relay.strided_slice(split_input, sb, se))
return relay.Tuple(strided_slices)
class PartitionedSplitRewriter(DFPatternCallback):
"""This pass brings the split out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.SplitParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
split_input = post.args[0]
split_params = ethosu_patterns.SplitParams(post.op.body)
indices_or_sections = split_params.indices_or_sections
axis = split_params.axis
return relay.op.split(split_input, indices_or_sections, axis=axis).astuple()
@ir.transform.module_pass(opt_level=1)
class LegalizeSplit:
"""This is the pass that wraps SplitRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(PartitionedSplitRewriter(), func)
func = rewrite(SplitRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
def get_lut_from_func(
ifm_scale: float, ifm_zp: int, ofm_scale: float, ofm_zp: int, func: Callable[[float], float]
) -> List[int]:
"""Method to calculate the values of the lookup table based on the calculation function"""
lut_values = list()
# Only int8 is currently supported
dtype = np.int8
qmin, qmax = np.iinfo(dtype).min, np.iinfo(dtype).max
for x in range(qmin, qmax + 1):
x_real = ifm_scale * (x - ifm_zp)
out_real = func(x_real)
lut_result = int(util.round_away_zero(ofm_zp + out_real / ofm_scale))
lut_result = min(qmax, max(qmin, lut_result))
lut_values.append(lut_result)
return lut_values
class LutActivationRewriter(DFPatternCallback):
"""A class to create an identity operator with the LUT"""
def __init__(
self, params_class: Type, activation_type: str, calc_func: Callable[[float], float]
):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (wildcard().has_attr({"Composite": params_class.composite_name}))(wildcard())
self.activation_type = activation_type
self.calc_func = calc_func
def callback(self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map):
id_input = post.args[0]
quantize_args = post.op.body.args
output_scale = float(quantize_args[1].data.asnumpy())
output_zp = int(quantize_args[2].data.asnumpy())
dequantize_args = quantize_args[0].args[0].args
input_scale = float(dequantize_args[1].data.asnumpy())
input_zp = int(dequantize_args[2].data.asnumpy())
lut_values = get_lut_from_func(
input_scale, input_zp, output_scale, output_zp, self.calc_func
)
lut = relay.const(lut_values, dtype="uint8")
# We baked the requantization into the LUT, so we don't requantize the identity operator
identity = ethosu_ops.ethosu_identity(
ifm=id_input,
lut=lut,
ifm_scale=input_scale,
ifm_zero_point=input_zp,
ofm_scale=input_scale,
ofm_zero_point=input_zp,
activation=self.activation_type,
)
return identity
class TanhRewriter(LutActivationRewriter):
"""This pass adds tanh as a LUT to the identity operator"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.TanhParams, activation_type="TANH", calc_func=math.tanh
)
@ir.transform.module_pass(opt_level=1)
class LegalizeTanh:
"""This is the pass that wraps TanhRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(TanhRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
def sigmoid_calc_func(x: float) -> float:
"""Function to calculate the values for sigmoid"""
# Thse limits are inherited from TFLite
upper_limit = 8.0
lower_limit = -8.0
if x <= lower_limit:
y = 0.0
elif x >= upper_limit:
y = 1.0
else:
y = 1 / (1 + math.exp(-x))
return y
class SigmoidRewriter(LutActivationRewriter):
"""This pass adds sigmoid as a LUT for identity op"""
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SigmoidParams,
activation_type="SIGMOID",
calc_func=sigmoid_calc_func,
)
@ir.transform.module_pass(opt_level=1)
class LegalizeSigmoid:
"""This is the pass that wraps SigmoidRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(SigmoidRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class Conv2DRewriter(DFPatternCallback):
"""Convert conv2d related composite functions into ethosu_conv2d operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (wildcard().has_attr({"Composite": "ethos-u.qnn_conv2d"}))(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
kernel_size_map = {
"HWIO": params.weights.shape[0:2],
"OHWI": params.weights.shape[1:3],
"HWOI": params.weights.shape[0:2],
}
if str(params.weights.layout) not in kernel_size_map.keys():
raise UnsupportedLayout(str(params.weights.layout))
activation_map = {"clip": "CLIP"}
weight_to_ohwi_transform_map = {"HWIO": [3, 0, 1, 2]}
weights_values = params.weights.values
weights_values_ohwi = np.transpose(
weights_values, weight_to_ohwi_transform_map[str(params.weights.layout)]
)
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_conv2d = ethosu_ops.ethosu_conv2d(
ifm=post.args[0],
weight=relay.const(weights_values_ohwi, params.weights.values.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=relay.const([], dtype="int8"),
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=int(params.weights.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=kernel_size_map[str(params.weights.layout)],
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
return ethosu_conv2d
@ir.transform.module_pass(opt_level=1)
class LegalizeConv2D:
"""This is the pass that wraps the Conv2DRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(Conv2DRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class DepthwiseConv2DRewriter(DFPatternCallback):
"""Convert ethosu.qnn_depthwise_conv2d composite functions to ethosu_depthwise_conv2d
operators"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr(
{"Composite": ethosu_patterns.QnnDepthwiseConv2DParams.composite_name}
)
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.QnnDepthwiseConv2DParams(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
if str(params.ofm.layout) not in channels_map.keys():
raise UnsupportedLayout(str(params.ofm.layout))
kernel_shape_map = {
"HWOI": params.weights.shape[0:2],
}
if str(params.weights.layout) not in kernel_shape_map.keys():
raise UnsupportedLayout(str(params.weights.layout))
weights_values = params.weights.values
weights_values_ohwi = np.moveaxis(weights_values, [0, 1, 2, 3], [1, 2, 0, 3])
activation = "NONE"
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], "int8")
clip_min = 0
clip_max = 0
if params.activation:
activation = ethosu_patterns.QnnDepthwiseConv2DParams.activation_map[
params.activation.op.name
]
if activation == "CLIP":
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
scale_bias = vela_api.pack_biases(
biases=params.biases.tensor.data.asnumpy(),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=params.weights.q_params.scale_f32,
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=activation in ["TANH", "SIGMOID"],
)
ethosu_depthwise_conv2d = ethosu_ops.ethosu_depthwise_conv2d(
post.args[0], # IFM
relay.const(weights_values_ohwi, params.weights.values.dtype),
relay.const(scale_bias, "uint8"),
lut,
float(params.ifm.q_params.scale_f32),
int(params.ifm.q_params.zero_point),
int(params.weights.q_params.zero_point),
float(params.ofm.q_params.scale_f32),
int(params.ofm.q_params.zero_point),
kernel_shape_map[str(params.weights.layout)],
params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
dilation=params.dilation,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
ofm_dtype=str(params.ofm.dtype),
)
return ethosu_depthwise_conv2d
@ir.transform.module_pass(opt_level=1)
class LegalizeDepthwiseConv2D:
"""This is the pass that wraps the DepthwiseConv2DRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(DepthwiseConv2DRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class PoolingRewriter(DFPatternCallback):
"""Convert ethosu.avgpool2d and ethosu.maxpool2d composite functions to
ethosu_pooling operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
channels_map = {
"NHWC": 3,
}
if str(params.ofm.layout) not in channels_map.keys():
raise UnsupportedLayout(str(params.ofm.layout))
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# Activations requiring LUT is currently not supported, so setting it to an empty list
lut = relay.const([], dtype="int8")
return ethosu_ops.ethosu_pooling(
ifm=post.args[0],
lut=lut,
pooling_type=params.pooling_type,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_zero_point=params.ifm.q_params.zero_point,
ofm_scale=params.ofm.q_params.scale_f32,
ofm_zero_point=params.ofm.q_params.zero_point,
pool_shape=params.pool_shape,
ofm_channels=params.ofm.shape[channels_map[str(params.ofm.layout)]],
strides=params.strides,
padding=params.padding,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
upscale="NONE",
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
class MaxPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.MaxPool2DParams.composite_name})
)(wildcard()),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeMaxPooling:
"""This is the pass that wraps the MaxPoolingRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(MaxPoolingRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class AvgPoolingRewriter(PoolingRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AvgPool2DParams,
pattern=(
wildcard().has_attr({"Composite": ethosu_patterns.AvgPool2DParams.composite_name})
)(wildcard()),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeAvgPooling:
"""This is the pass that wraps the AvgPoolingRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(AvgPoolingRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class BinaryElementwiseRewriter(DFPatternCallback):
"""Convert ethosu binary elementwise composite functions to
ethosu_binary_elementwise operators"""
def __init__(
self,
params_class: Type,
pattern: CallPattern,
):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
@staticmethod
def reshape_input(
inputs: List["TensorParams"],
) -> List[tvm.relay.Expr]:
"""Reshape the inputs so that the following binary elementwise
operator receives 4-dimensional inputs.
Parameters
----------
inputs: List[TensorParams]
The inputs to reshape.
Returns
-------
reshaped_inputs: List[tvm.relay.Expr]
The new reshaped inputs.
"""
reshaped_inputs = []
for i in inputs:
in_shape = i.shape
if len(in_shape) < 4:
pad_size = 4 - len(in_shape)
new_shape = ([1] * pad_size) + in_shape
new_call = relay.reshape(i.tensor, new_shape)
reshaped_inputs.append(new_call)
else:
reshaped_inputs.append(i.tensor)
return reshaped_inputs
@staticmethod
def reshape_output(output: tvm.relay.Expr, ifm_input_shape: List[int]) -> tvm.relay.Expr:
"""Reshape the output back to the original dimensionality.
Since the NPU must have the brodcastable tensor as the
second operand, the original shape of the first ifm must
be the output shape.
Parameters
----------
output: tvm.relay.Expr
The output to reshape.
ifm_input_shape: List[int]
The shape of the non-reshaped ifm tensor.
Returns
-------
reshaped_output: tvm.relay.Expr
The reshaped output expression.
"""
if len(ifm_input_shape) == 4:
return output
reshaped_output = relay.reshape(output, ifm_input_shape)
return reshaped_output
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[1] if params.reversed_operands else post.args[0]
params.ifm2.tensor = post.args[0] if params.reversed_operands else post.args[1]
channels_map = {
"NHWC": 3,
}
if str(params.ofm.layout) not in channels_map.keys():
raise UnsupportedLayout(str(params.ofm.layout))
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that need to get legalized to LUTs.
lut = relay.const([], dtype="int8")
inputs = [params.ifm, params.ifm2]
inputs = self.reshape_input(inputs)
ethosu_binary_elementwise = ethosu_ops.ethosu_binary_elementwise(
ifm=inputs[0],
ifm2=inputs[1],
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ifm2_scale=float(params.ifm2.q_params.scale_f32),
ifm2_zero_point=int(params.ifm2.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=params.ifm.shape[-1],
ifm2_channels=params.ifm2.shape[-1],
reversed_operands=params.reversed_operands,
ofm_dtype=params.ofm.dtype,
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ifm2_layout=str(params.ifm2.layout),
ofm_layout=str(params.ofm.layout),
)
output = self.reshape_output(ethosu_binary_elementwise, params.ifm.shape)
return output
class AddRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AddParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AddParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeAdd:
"""This is the pass that wraps the AddRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(AddRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class SubRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.SubParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.SubParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeSub:
"""This is the pass that wraps the SubRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(SubRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class MulRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MulParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MulParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeMul:
"""This is the pass that wraps the MulRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(MulRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class MinRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MinParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MinParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeMin:
"""This is the pass that wraps the MinRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(MinRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class MaxRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.MaxParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.MaxParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeMax:
"""This is the pass that wraps the MaxRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(MaxRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class ShlRewriter(BinaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.ShlParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.ShlParams.composite_name}))(
wildcard(), wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeShl:
"""This is the pass that wraps the ShlRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(ShlRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class StridedSliceRewriter(DFPatternCallback):
"""This pass brings the strided slice out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.StridedSliceParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
slice_input = post.args[0]
params = ethosu_patterns.StridedSliceParams(post.op.body)
strided_slice = relay.op.strided_slice(
slice_input,
params.begin,
params.end,
strides=params.strides,
axes=params.axes,
slice_mode=params.slice_mode,
)
return strided_slice
@ir.transform.module_pass(opt_level=1)
class LegalizeStridedSlice:
"""This is the pass that wraps StridedSliceRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(StridedSliceRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class ReshapeRewriter(DFPatternCallback):
"""This pass brings the reshape out of the partitioned function"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ReshapeParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
reshape_input = post.args[0]
reshape_params = ethosu_patterns.ReshapeParams(post.op.body)
new_shape = reshape_params.new_shape
return relay.op.reshape(reshape_input, newshape=new_shape)
@ir.transform.module_pass(opt_level=1)
class LegalizeReshape:
"""This is the pass that wraps ReshapeRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(ReshapeRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class NoOpRewriter(DFPatternCallback):
"""This pass adds an idenity operator to reshape and strided slice to avoid a no op
without a consumer"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.reshape = is_op("reshape")(wildcard())
self.strided_slice = is_op("strided_slice")(wildcard())
self.pattern = self.reshape | self.strided_slice
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
if pre.checked_type.dtype == "int32":
return post
return ethosu_ops.ethosu_identity(ifm=post, lut=relay.const([], dtype="int8"))
@ir.transform.module_pass(opt_level=1)
class LegalizeNoOps:
"""This is the pass that wraps RewriteNoOps"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(NoOpRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class UnaryElementwiseRewriter(DFPatternCallback):
"""
Convert ethosu unary elementwise composite function to
ethosu_unary_elementwise operators
"""
def __init__(self, params_class: Type, pattern: CallPattern):
super().__init__(require_type=True)
self.params_class = params_class
self.pattern = pattern
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = self.params_class(post.op.body)
params.ifm.tensor = post.args[0]
if str(params.ofm.layout) != "NHWC":
raise UnsupportedLayout(str(params.ofm.layout))
activation_map = {"clip": "CLIP"}
if params.activation:
activation = activation_map[params.activation.op.name]
clip_min = int(params.activation.attrs.a_min)
clip_max = int(params.activation.attrs.a_max)
else:
activation = "NONE"
clip_min = 0
clip_max = 0
# We don't yet support activation functions that use LUT.
lut = relay.const([], dtype="int8")
unary_input_shape = params.ifm.shape
# If the input tensor is not 4D, enter reshapes before and after the unary operator
if len(params.ifm.shape) == 4:
unary_input = params.ifm.tensor
else:
pad_size = 4 - len(unary_input_shape)
unary_input_shape = ([1] * pad_size) + unary_input_shape
unary_input = relay.op.reshape(params.ifm.tensor, newshape=unary_input_shape)
ethosu_unary_elementwise = ethosu_ops.ethosu_unary_elementwise(
ifm=unary_input,
lut=lut,
operator_type=params.operator_type,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ofm_channels=unary_input_shape[3],
activation=activation,
clip_min=clip_min,
clip_max=clip_max,
ifm_layout=str(params.ifm.layout),
ofm_layout=str(params.ofm.layout),
)
if len(params.ifm.shape) == 4:
op = ethosu_unary_elementwise
else:
op = relay.op.reshape(ethosu_unary_elementwise, newshape=params.ifm.shape)
return op
class AbsRewriter(UnaryElementwiseRewriter):
def __init__(self):
super().__init__(
params_class=ethosu_patterns.AbsParams,
pattern=(wildcard().has_attr({"Composite": ethosu_patterns.AbsParams.composite_name}))(
wildcard()
),
)
@ir.transform.module_pass(opt_level=1)
class LegalizeAbs:
"""This is the pass that wraps the AbsRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(AbsRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class MeanRewriter(DFPatternCallback):
"""Convert ethosu.mean composite functions to to an equivalent legalization:
- Case 1 (axis == [1, 2] and keepsdims == True):
ethosu_depthwise_conv2d + ethosu_binary_elementwise
- Case 2 (ifm qparams == ofm qparams): ethosu_pooling
- Case 3 (else): ethosu_depthwise_conv2d
"""
def __init__(self):
super().__init__(require_type=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.MeanParams.composite_name})
)(wildcard())
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
params = ethosu_patterns.MeanParams(post.op.body)
params.ifm.tensor = post.args[0]
ifm_shape = params.ifm.shape
ofm_shape = params.ofm.shape
lut = relay.const([], "int8")
axis = params.axis
reduced_op = params.ifm.tensor
# Enforce 4d input
if len(ifm_shape) < 4:
axis = [x + 1 for x in axis]
if len(ifm_shape) == 3:
ifm_shape = [1, params.height, params.width, ifm_shape[2]]
else:
ifm_shape = [1, params.height, params.width, 1]
reduced_op = relay.reshape(reduced_op, ifm_shape)
filter_height = ifm_shape[1] if 1 in axis else 1
filter_width = ifm_shape[2] if 2 in axis else 1
in_channels = out_channels = ifm_shape[-1]
# If the height is greater than max kernel height, reshape the input
# from [filter_height, filter_width] to [1, (filter_height*filter_width)]
# only in the case the axis is [1, 2].
if axis == [1, 2] and filter_height > 64:
ifm_shape = (ifm_shape[0], 1, filter_height * filter_width, in_channels)
filter_width = filter_height * filter_width
filter_height = 1
reduced_op = relay.reshape(reduced_op, ifm_shape)
if axis == [1, 2] and params.keepdims:
weight_scale = 1
weight_values = np.ones([out_channels, filter_height, filter_width, in_channels])
scale_bias = vela_api.pack_biases(
biases=np.zeros(ifm_shape[-1]),
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array([weight_scale], dtype=np.float),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
ifm=reduced_op,
weight=relay.const(weight_values, params.ifm.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=int(params.ifm.q_params.zero_point),
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=(filter_height, filter_width),
ofm_channels=out_channels,
ofm_dtype="int16",
)
n = int(filter_height * filter_width)
eps = 1 / (256 * (n + 1)) if n % 2 == 0 else 0
scalar_tensor = relay.const(np.ones([1, 1, 1, 1], dtype="int16"), dtype="int16")
reduced_op = ethosu_ops.ethosu_binary_elementwise(
ifm=reduced_op,
ifm2=scalar_tensor,
lut=lut,
operator_type="MUL",
ifm_scale=float(params.ofm.q_params.scale_f32),
ifm_zero_point=int(params.ofm.q_params.zero_point),
ifm2_scale=1 / (n - eps),
ifm2_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
ifm_channels=out_channels,
ifm2_channels=out_channels,
reversed_operands=False,
ofm_dtype="int8",
rounding_mode="NATURAL",
)
elif (
params.ifm.q_params.scale_f32 == params.ofm.q_params.scale_f32
and params.ifm.q_params.zero_point == params.ofm.q_params.zero_point
):
reduced_op = ethosu_ops.ethosu_pooling(
ifm=reduced_op,
lut=lut,
pooling_type="AVG",
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=0,
pool_shape=(filter_height, filter_width),
ofm_channels=out_channels,
rounding_mode="TRUNCATE",
)
else:
weight_scale = 1 / (filter_height * filter_width)
weight_values = np.ones([out_channels, filter_height, filter_width, in_channels])
bias = -1 * int(params.ifm.q_params.zero_point) * filter_height * filter_width
scale_bias = vela_api.pack_biases(
biases=np.ones([ifm_shape[-1]]) * bias,
ifm_scale=params.ifm.q_params.scale_f32,
ifm_dtype=np.dtype(params.ifm.dtype),
weight_scales=np.array([weight_scale], dtype=np.float),
ofm_scale=params.ofm.q_params.scale_f32,
is_activation_tanh_or_sigmoid=False,
)
reduced_op = ethosu_ops.ethosu_depthwise_conv2d(
ifm=reduced_op,
weight=relay.const(weight_values, params.ifm.dtype),
scale_bias=relay.const(scale_bias, "uint8"),
lut=lut,
ifm_scale=float(params.ifm.q_params.scale_f32),
ifm_zero_point=0,
weight_zero_point=0,
ofm_scale=float(params.ofm.q_params.scale_f32),
ofm_zero_point=int(params.ofm.q_params.zero_point),
kernel_shape=(filter_height, filter_width),
ofm_channels=out_channels,
rounding_mode="NATURAL",
)
# Reshape to original ofm shape
if len(ofm_shape) < 4:
reduced_op = relay.reshape(reduced_op, ofm_shape)
return reduced_op
@ir.transform.module_pass(opt_level=1)
class LegalizeMean:
"""This is the pass that wraps the MeanRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(MeanRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
class ConcatRewriter(DFPatternCallback):
"""The newer versions of TFLite converters return a concatenate operator that concatenates
tensors with same QNN params (if the QNN params of tensors were initially different,
the converter adds a requantize node), so this rewriter replaces the QNN concatenate with
"normal" concatenate"""
def __init__(self):
super().__init__(require_type=True, rewrite_once=True)
self.pattern = (
wildcard().has_attr({"Composite": ethosu_patterns.ConcatParams.composite_name})
)(None)
def callback(
self, pre: tvm.relay.Expr, post: tvm.relay.Expr, node_map: tvm.ir.container.Map
) -> tvm.relay.Expr:
# Find the tensors that are inputs to the concat and the scales and zero points
concat_args = list()
for arg in post.args:
if isinstance(arg, tvm.relay.expr.Call):
concat_args.append(arg)
axis = post.op.body.attrs.axis
concat = relay.op.concatenate(relay.Tuple(concat_args), axis=axis)
return concat
@ir.transform.module_pass(opt_level=1)
class LegalizeConcat:
"""This is the pass that wraps ConcatRewriter"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
for global_var, func in mod.functions.items():
func = rewrite(ConcatRewriter(), func)
mod.update_func(global_var, func)
return mod
def __call__(self, *args, **kwargs):
pass
@ir.transform.module_pass(opt_level=1)
class LegalizeEthosU:
"""This is the pass to call graph-rewrites to perform graph transformation
in a way such that the operations are replaced with hardware/codegen supported
operations.
"""
def transform_module(
self, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.ir.IRModule:
"""This is the method that replaces the operations with hardware/codegen supported
operations.
"""
mod = LegalizeSplit()(mod)
mod = LegalizeConv2D()(mod)
mod = LegalizeDepthwiseConv2D()(mod)
mod = LegalizeMaxPooling()(mod)
mod = LegalizeAvgPooling()(mod)
mod = LegalizeAdd()(mod)
mod = LegalizeSub()(mod)
mod = LegalizeMul()(mod)
mod = LegalizeMin()(mod)
mod = LegalizeMax()(mod)
mod = LegalizeShl()(mod)
mod = LegalizeAbs()(mod)
mod = LegalizeTanh()(mod)
mod = LegalizeMean()(mod)
mod = LegalizeConcat()(mod)
mod = LegalizeSigmoid()(mod)
mod = LegalizeReshape()(mod)
mod = LegalizeStridedSlice()(mod)
mod = LegalizeNoOps()(mod)
return mod
def __call__(self, *args, **kwargs):
# pylint is unable figure out the decorated
# class is callable, thus adding this to
# suppress the warning.
pass
| |
import ipaddress
import hashlib
import time
import struct
import nacl.secret
import urllib.parse
from base64 import urlsafe_b64encode, urlsafe_b64decode
from bitstring import BitArray
from .utils import pad, depad
class Nut(object):
"""A class encompassing SQRL nuts.
The server should not need to use this class directly, but of course
it may. It is designed to work as follows:
- Construct the object with the 32-byte key.
- If generating a new nut, use :py:meth:`.generate` followed by
:py:meth:`.toString`.
- If validating an existing nut, use :py:meth:`.load`, then :py:meth:`.validate`,
then look at the various attributes to determine if any errors were found.
Attributes:
key (bytes) : 32 bytes used to encrypt the nut.
ipmatch (bool) : Whether the last validation found matching IPs.
fresh (bool) : Whether the last validation found the nut to be fresh.
countersane (bool) : Whether the last validation found the
counter to be within limits. Default is False, even if counter
checking was disabled.
isqr (bool) : Set when loading a nut. States whether it's a QR nut.
islink (bool) : Set when loading a nut. States whether it's a link nut.
"""
def __init__(self, key):
"""Constructor
Args:
key (bytes) : 32-byte key used to encrypt/decrypt the nut
"""
assert len(key) == 32
self.nuts = {'raw': None, 'qr': None, 'link': None}
self.key = key
self.ipmatch = False
self.fresh = False
self.countersane = False
self.isqr = False
self.islink = False
def generate(self, ipaddr, counter, timestamp=None):
"""Generates a unique nut using the technique described in the spec (LINK)
Args:
ipaddr (string) : The string representation of a valid
IPv4 or IPv6 address.
counter (uint) :
An incremental counter. Used for sanity checking.
Keyword Args:
timestamp (uint) : Unix timestamp (seconds only). If None,
current time is used.
Returns:
Nut : The populated Nut object.
"""
self.ip = ipaddress.ip_address(ipaddr)
baip = BitArray(self.ip.packed)
#Shorten to 32 bits if IPv6
if (len(baip) == 128):
m = hashlib.sha256()
m.update(self.key)
m.update(baip.bytes)
baip = BitArray(m.digest())[-32:]
self.timestamp = timestamp
if self.timestamp is None:
self.timestamp = time.time()
batime = BitArray(struct.pack('I', int(self.timestamp)))
self.counter = counter
bacounter = BitArray(struct.pack('I', counter))
barand = BitArray(nacl.utils.random(4))
#compose the 128 array
self.nuts['raw'] = baip + batime + bacounter + barand
assert len(self.nuts['raw']) == 128
self.nuts['qr'] = BitArray(self.nuts['raw'])
self.nuts['qr'][-1] = 0
self.nuts['link'] = BitArray(self.nuts['raw'])
self.nuts['link'][-1] = 1
#encrypt
box = nacl.secret.SecretBox(self.key)
self.nuts['qr'] = box.encrypt(self.nuts['qr'].bytes)
self.nuts['link'] = box.encrypt(self.nuts['link'].bytes)
return self
def load(self, nut):
"""Decrypts the given nut and extracts its parts.
Args:
nut (string) : A previously generated nut string
Returns
Nut
"""
#decrypt the nut
box = nacl.secret.SecretBox(self.key)
msg = urlsafe_b64decode(pad(nut).encode('utf-8'))
out = box.decrypt(msg)
self.nuts['raw'] = BitArray(out)
assert len(self.nuts['raw']) == 128
#extract ipaddress (not possible, one way only)
self.ip = None
#verify timestamp
self.timestamp = struct.unpack('I', self.nuts['raw'][32:64].bytes)[0]
#verify counter
self.counter = struct.unpack('I', self.nuts['raw'][64:96].bytes)[0]
#set flag
if self.nuts['raw'][-1] == 0:
self.isqr = True
self.islink = False
else:
self.isqr = False
self.islink = True
return self
def validate(self, ipaddr, ttl, maxcounter=None, mincounter=0):
"""Validates the currently loaded nut.
The nut must be generated or loaded first. It is the user's
responsiblity to keep a list of valid nuts and reject repeats,
to avoid replay attacks. This routine only validates the data
encoded into the nut.
Args:
ipaddr (string) : The string representation of a valid
IPv4 or IPv6 address.
ttl (uint) : Number of seconds old the nut is allowed to be.
Keyword Args:
maxcounter (uint) : Current counter. If None, then no
upper-bound checking will occur.
mincounter (uint) : Smallest counter value you're willing
to accept. If None, then no lower-bound checking will
occur
Returns:
Nut : The user has to inspect the attributes ``ipmatch``,
``fresh``, and ``countersane`` to determine if the nut fully
validated.
"""
#verify ipaddress
ip = ipaddress.ip_address(ipaddr)
baip = BitArray(ip.packed)
#Shorten to 32 bits if IPv6
if (len(baip) == 128):
m = hashlib.sha256()
m.update(self.key)
m.update(baip.bytes)
baip = BitArray(m.digest())[-32:]
if baip == self.nuts['raw'][:32]:
self.ipmatch = True
else:
self.ipmatch = False
#verify timestamp
now = int(time.time())
nuttime = self.timestamp
if ( (nuttime <= now) and ((now - nuttime) < ttl) ):
self.fresh = True
else:
self.fresh = False
#verify counter
if ( ( (mincounter is None) or (self.counter >= mincounter) ) and ( (maxcounter is None) or (self.counter <= maxcounter) ) ):
self.countersane = True
else:
self.countersane = False
return self
def toString(self, flag):
"""Converts the given nut to a base64url-encoded string
Args:
flag (string) : One of ``qr``, ``link``, or ``raw``.
Warning:
While it is possible to do this to the "raw" nut, don't! It has
not been encrypted.
Returns:
string : b64u-encoded nut
"""
if flag not in self.nuts:
return None
return depad(urlsafe_b64encode(self.nuts[flag]).decode('utf-8'))
| |
import os.path
from datetime import datetime, timedelta
import json
import time
from osgeo import ogr, osr
OTP_ROUTER_EPSG = 4326
LOCATION_NAME_FIELD = "Name"
ID_FIELD = "id"
TIME_FIELD = "time"
def rasterName(loc_name, date, time, base_path=None, suffix=None):
fname = str.replace(loc_name, ' ', '_') \
+ '-' + str.replace(date, '-', '_') \
+ '-' + str.replace(time, ':', '_')
if suffix and suffix != "":
fname += '-' + suffix
fname += '.tiff'
if base_path is not None:
path = os.path.join(os.path.expanduser(base_path), fname)
else:
path = fname
return path
def vectorName(loc_name, date, time, iso, vec_type,
base_path=None, suffix=None):
fname = str.replace(loc_name, ' ', '_') \
+ '-' + str.replace(date, '-', '_') \
+ '-' + str.replace(time, ':', '_') \
+ '-' + str(iso) +"min" \
+ '-' + str.lower(vec_type)
if suffix and suffix != "":
fname += '-' + suffix
fname += '.geojson'
if base_path is not None:
path = os.path.join(os.path.expanduser(base_path), fname)
else:
path = fname
return path
def avgRasterName(loc_name, datestr, timestr, save_path, save_suffix,
num_each_side):
numavg = 1 + 2*num_each_side
fname_base = rasterName(loc_name, datestr, timestr, save_path,
save_suffix)
return os.path.splitext(fname_base)[0] + "-avg%d.tiff" % numavg
def isoContoursName(loc_name, datestr, timestr, save_path, save_suffix,
num_each_side):
avg_fname = avgRasterName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
return os.path.splitext(avg_fname)[0] + "-isocontours.shp"
def isoBandsName(loc_name, datestr, timestr, save_path, save_suffix,
num_each_side):
avg_fname = avgRasterName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
return os.path.splitext(avg_fname)[0] + "-isobands.shp"
def isoBandsAllName(loc_name, datestr, timestr, save_path, save_suffix,
num_each_side):
isob_fname = isoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
return os.path.splitext(isob_fname)[0] + "-all.shp"
def polysIsoBandsName(loc_name, datestr, timestr, save_path, save_suffix,
num_each_side, iso_level):
isob_fname = isoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
return os.path.splitext(isob_fname)[0] + "-%d-polys.shp" % iso_level
def smoothedIsoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side, iso_level):
isob_fname = isoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
smoothed_fname = os.path.splitext(isob_fname)[0] + "-%d-smoothed.shp"\
% (iso_level)
return smoothed_fname
def smoothedIsoBandsNameCombined(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side):
isob_fname = isoBandsName(loc_name, datestr, timestr, save_path,
save_suffix, num_each_side)
smoothed_fname = os.path.splitext(isob_fname)[0] + "-smoothed.shp"
return smoothed_fname
def get_nearby_min_diffs(nearby_minutes, num_each_side):
inc_range = range(1, num_each_side+1)
mins_before = [-nearby_minutes * ii/float(num_each_side) \
for ii in reversed(inc_range)]
mins_after = [nearby_minutes * ii/float(num_each_side) \
for ii in inc_range]
mins_diffs = mins_before + [0] + mins_after
return mins_diffs
DATE_FORMAT_STR = "%Y-%m-%d"
TIME_FORMAT_STR = "%H:%M:%S"
def get_date_time_string_set(base_date, base_time, mins_diffs):
dtime_orig = datetime.strptime(base_date+base_time,
DATE_FORMAT_STR+TIME_FORMAT_STR)
date_time_set = []
for mins_diff in mins_diffs:
time_diff = timedelta(minutes=mins_diff)
mt = dtime_orig + time_diff
date_mod = mt.strftime(DATE_FORMAT_STR)
time_mod = mt.strftime(TIME_FORMAT_STR)
date_time_set.append((date_mod, time_mod))
return date_time_set
def get_raster_filenames(loc_name, date_time_str_set, base_path, suffix):
fname_set = []
for date_time_tuple in date_time_str_set:
date_mod, time_mod = date_time_tuple
fname_set.append(rasterName(loc_name, date_mod, time_mod,
base_path, suffix))
return fname_set
def gen_multi_graph_iso_spec(base_path, server_url, graph_infos,
iso_set_specifications):
iso_spec_list = []
if graph_infos is None or len(graph_infos) is 0:
iso_spec_list.append((server_url, None, base_path, None,
iso_set_specifications))
else:
for otp_router_id, graph_subdir, save_suffix in graph_infos:
out_path = os.path.join(base_path, graph_subdir)
iso_spec_list.append((server_url, otp_router_id, out_path,
save_suffix, iso_set_specifications))
return iso_spec_list
def save_metadata(multi_graph_iso_set):
print "Saving metadata for each run in JSON format..."
fnames = []
for server_url, otp_router_id, save_path, save_suffix, isos_spec in \
multi_graph_iso_set:
now = datetime.now()
d_t_str = now.strftime("%Y-%m-%d_%Hh_%Mm_%Ss")
meta_fname = "-".join(["isos-metadata", d_t_str]) + ".json"
meta_fname = os.path.join(os.path.abspath(save_path),
meta_fname)
fnames.append(meta_fname)
print "...saving metadata of an isochrone set into %s" % \
(meta_fname)
meta_dict = {}
meta_dict['run_time'] = now.isoformat()
meta_dict['server_url'] = server_url
meta_dict['otp_router_id'] = otp_router_id
meta_dict['save_suffix'] = save_suffix
meta_dict['iso_set_specification'] = isos_spec
if not os.path.exists(save_path):
os.makedirs(save_path)
meta_file = open(meta_fname, "w")
json.dump(meta_dict, meta_file, indent=2)
meta_file.close()
# This is to ensure we don't overwrite files.
time.sleep(1.01)
print "Done."
return fnames
def load_iso_set_from_files(fnames):
iso_spec_list = []
for fname in fnames:
meta_file = open(fname, "r")
meta_dict = json.load(meta_file)
iso_spec_list.append(
(meta_dict['server_url'],
meta_dict['otp_router_id'],
os.path.dirname(fname),
meta_dict['save_suffix'],
meta_dict['iso_set_specification']
))
return iso_spec_list
def load_locations_from_shpfile(shpfile_name):
"""Desired output format is a list of tuples containing a location name,
and a lon, lat pair, e.g.:
("MONASH UNI CLAYTON", (145.13163, -37.91432))"""
locations = []
output_srs = osr.SpatialReference()
output_srs.ImportFromEPSG(OTP_ROUTER_EPSG)
locations_shp = ogr.Open(shpfile_name, 0)
if locations_shp is None:
print "Error, input locations shape file given, %s , failed to open." \
% (shpfile_name)
sys.exit(1)
locations_lyr = locations_shp.GetLayer(0)
locations_srs = locations_lyr.GetSpatialRef()
transform = None
if not locations_srs.IsSame(output_srs):
transform = osr.CoordinateTransformation(locations_srs, output_srs)
locations = []
for loc_feat in locations_lyr:
loc_name = loc_feat.GetField(LOCATION_NAME_FIELD)
loc_geom = loc_feat.GetGeometryRef()
if transform:
loc_geom.Transform(transform)
locations.append((loc_name, loc_geom.GetPoint_2D(0)))
return locations
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/use-multiple-paths/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to multipath
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"use-multiple-paths",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/use_multiple_paths/state/enabled (boolean)
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/use_multiple_paths/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/use-multiple-paths/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to multipath
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"use-multiple-paths",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/use_multiple_paths/state/enabled (boolean)
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/use_multiple_paths/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: Whether the use of multiple paths for the same NLRI is
enabled for the neighbor. This value is overridden by
any more specific configuration value.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
| |
# -*- encoding: utf-8 -*-
'''
HubbleStack Nova plugin for verifying attributes associated with a mounted partition.
Supports both blacklisting and whitelisting patterns. Blacklisted patterns must
not be found in the specified file. Whitelisted patterns must be found in the
specified file.
:maintainer: HubbleStack / basepi
:maturity: 2017.8.29
:platform: All
:requires: SaltStack
This audit module requires yaml data to execute. It will search the local
directory for any .yaml files, and if it finds a top-level 'mount' key, it will
use that data.
Sample YAML data, with inline comments:
mount:
whitelist: # or blacklist
ensure_nodev_option_on_/tmp: # unique ID
data:
CentOS Linux-6: # osfinger grain
- '/tmp': # path of partition
tag: 'CIS-1.1.1' # audit tag
attribute: nodev # attribute which must exist for the mounted partition
check_type: soft # if 'hard', the check fails if the path doesn't exist or
# if it is not a mounted partition. If 'soft', the test passes
# for such cases (default: hard)
labels:
- critical
'''
from __future__ import absolute_import
import logging
import fnmatch
import os
import copy
import salt.utils
import salt.utils.platform
from distutils.version import LooseVersion
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.platform.is_windows():
return False, 'This audit module only runs on linux'
return True
def apply_labels(__data__, labels):
'''
Filters out the tests whose label doesn't match the labels given when running audit and returns a new data structure with only labelled tests.
'''
labelled_data = {}
if labels:
labelled_data['mount'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in __data__.get('mount', {}):
labelled_test_cases=[]
for test_case in __data__['mount'].get(topkey, []):
# each test case is a dictionary with just one key-val pair. key=test name, val=test data, description etc
if isinstance(test_case, dict) and test_case:
test_case_body = test_case.get(next(iter(test_case)))
if set(labels).issubset(set(test_case_body.get('labels',[]))):
labelled_test_cases.append(test_case)
labelled_data['mount'][topkey]=labelled_test_cases
else:
labelled_data = __data__
return labelled_data
def audit(data_list, tags, labels, debug=False, **kwargs):
'''
Run the mount audits contained in the YAML files processed by __virtual__
'''
__data__ = {}
for profile, data in data_list:
_merge_yaml(__data__, data, profile)
__data__ = apply_labels(__data__, labels)
__tags__ = _get_tags(__data__)
if debug:
log.debug('mount audit __data__:')
log.debug(__data__)
log.debug('mount audit __tags__:')
log.debug(__tags__)
ret = {'Success': [], 'Failure': [], 'Controlled': []}
for tag in __tags__:
if fnmatch.fnmatch(tag, tags):
for tag_data in __tags__[tag]:
if 'control' in tag_data:
ret['Controlled'].append(tag_data)
continue
name = tag_data.get('name')
audittype = tag_data.get('type')
if 'attribute' not in tag_data:
log.error('No attribute found for mount audit {0}, file {1}'
.format(tag, name))
tag_data = copy.deepcopy(tag_data)
tag_data['error'] = 'No pattern found'.format(mod)
ret['Failure'].append(tag_data)
continue
attribute = tag_data.get('attribute')
check_type = 'hard'
if 'check_type' in tag_data:
check_type = tag_data.get('check_type')
if check_type not in ['hard', 'soft']:
log.error('Unrecognized option: ' + check_type)
tag_data = copy.deepcopy(tag_data)
tag_data['error'] = 'check_type can only be hard or soft'
ret['Failure'].append(tag_data)
continue
found = _check_mount_attribute(name, attribute, check_type)
if audittype == 'blacklist':
if found:
ret['Failure'].append(tag_data)
else:
ret['Success'].append(tag_data)
elif audittype == 'whitelist':
if found:
ret['Success'].append(tag_data)
else:
ret['Failure'].append(tag_data)
return ret
def _merge_yaml(ret, data, profile=None):
'''
Merge two yaml dicts together at the mount:blacklist and mount:whitelist level
'''
if 'mount' not in ret:
ret['mount'] = {}
for topkey in ('blacklist', 'whitelist'):
if topkey in data.get('mount', {}):
if topkey not in ret['mount']:
ret['mount'][topkey] = []
for key, val in data['mount'][topkey].iteritems():
if profile and isinstance(val, dict):
val['nova_profile'] = profile
ret['mount'][topkey].append({key: val})
return ret
def _get_tags(data):
'''
Retrieve all the tags for this distro from the yaml
'''
ret = {}
distro = __grains__.get('osfinger')
for toplist, toplevel in data.get('mount', {}).iteritems():
# mount:blacklist
for audit_dict in toplevel:
# mount:blacklist:0
for audit_id, audit_data in audit_dict.iteritems():
# mount:blacklist:0:telnet
tags_dict = audit_data.get('data', {})
# mount:blacklist:0:telnet:data
tags = None
for osfinger in tags_dict:
if osfinger == '*':
continue
osfinger_list = [finger.strip() for finger in osfinger.split(',')]
for osfinger_glob in osfinger_list:
if fnmatch.fnmatch(distro, osfinger_glob):
tags = tags_dict.get(osfinger)
break
if tags is not None:
break
# If we didn't find a match, check for a '*'
if tags is None:
tags = tags_dict.get('*', [])
# mount:blacklist:0:telnet:data:Debian-8
if isinstance(tags, dict):
# malformed yaml, convert to list of dicts
tmp = []
for name, tag in tags.iteritems():
tmp.append({name: tag})
tags = tmp
for item in tags:
for name, tag in item.iteritems():
tag_data = {}
# Whitelist could have a dictionary, not a string
if isinstance(tag, dict):
tag_data = copy.deepcopy(tag)
tag = tag_data.pop('tag')
if tag not in ret:
ret[tag] = []
formatted_data = {'name': name,
'tag': tag,
'module': 'mount',
'type': toplist}
formatted_data.update(tag_data)
formatted_data.update(audit_data)
formatted_data.pop('data')
ret[tag].append(formatted_data)
return ret
def _check_mount_attribute(path, attribute, check_type):
'''
This function checks if the partition at a given path is mounted with a particular attribute or not.
If 'check_type' is 'hard', the function returns False if he specified path does not exist, or if it
is not a mounted partition. If 'check_type' is 'soft', the functions returns True in such cases.
'''
if not os.path.exists(path):
if check_type == 'hard':
return False
else:
return True
mount_object = __salt__['mount.active']()
if path in mount_object:
attributes = mount_object.get(path)
opts = attributes.get('opts')
if attribute in opts:
return True
else:
return False
else:
if check_type == 'hard':
return False
else:
return True
| |
"""
Comsystem command module.
Comm commands are OOC commands and intended to be made available to
the Player at all times (they go into the PlayerCmdSet). So we
make sure to homogenize self.caller to always be the player object
for easy handling.
"""
from django.conf import settings
from src.comms.models import ChannelDB, Msg, PlayerChannelConnection, ExternalChannelConnection
from src.comms import irc, imc2, rss
from src.comms.channelhandler import CHANNELHANDLER
from src.utils import create, utils, prettytable
from src.utils.utils import make_iter
from src.commands.default.muxcommand import MuxCommand, MuxPlayerCommand
# limit symbol import for API
__all__ = ("CmdAddCom", "CmdDelCom", "CmdAllCom",
"CmdChannels", "CmdCdestroy", "CmdCBoot", "CmdCemit",
"CmdCWho", "CmdChannelCreate", "CmdClock", "CmdCdesc",
"CmdPage", "CmdIRC2Chan", "CmdIMC2Chan", "CmdIMCInfo",
"CmdIMCTell", "CmdRSS2Chan")
def find_channel(caller, channelname, silent=False, noaliases=False):
"""
Helper function for searching for a single channel with
some error handling.
"""
channels = ChannelDB.objects.channel_search(channelname)
if not channels:
if not noaliases:
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if channelname in chan.aliases.all()]
if channels:
return channels[0]
if not silent:
caller.msg("Channel '%s' not found." % channelname)
return None
elif len(channels) > 1:
matches = ", ".join(["%s(%s)" % (chan.key, chan.id) for chan in channels])
if not silent:
caller.msg("Multiple channels match (be more specific): \n%s" % matches)
return None
return channels[0]
class CmdAddCom(MuxPlayerCommand):
"""
addcom - subscribe to a channel with optional alias
Usage:
addcom [alias=] <channel>
Joins a given channel. If alias is given, this will allow you to
refer to the channel by this alias rather than the full channel
name. Subsequent calls of this command can be used to add multiple
aliases to an already joined channel.
"""
key = "addcom"
aliases = ["aliaschan", "chanalias"]
help_category = "Comms"
locks = "cmd:not pperm(channel_banned)"
def func(self):
"Implement the command"
caller = self.caller
args = self.args
player = caller
if not args:
self.msg("Usage: addcom [alias =] channelname.")
return
if self.rhs:
# rhs holds the channelname
channelname = self.rhs
alias = self.lhs
else:
channelname = self.args
alias = None
channel = find_channel(caller, channelname)
if not channel:
# we use the custom search method to handle errors.
return
# check permissions
if not channel.access(player, 'listen'):
self.msg("%s: You are not allowed to listen to this channel." % channel.key)
return
string = ""
if not channel.has_connection(player):
# we want to connect as well.
if not channel.connect_to(player):
# if this would have returned True, the player is connected
self.msg("%s: You are not allowed to join this channel." % channel.key)
return
else:
string += "You now listen to the channel %s. " % channel.key
else:
string += "You are already connected to channel %s." % channel.key
if alias:
# create a nick and add it to the caller.
caller.nicks.add(alias, channel.key, category="channel")
string += " You can now refer to the channel %s with the alias '%s'."
self.msg(string % (channel.key, alias))
else:
string += " No alias added."
self.msg(string)
class CmdDelCom(MuxPlayerCommand):
"""
delcom - unsubscribe from channel or remove channel alias
Usage:
delcom <alias or channel>
If the full channel name is given, unsubscribe from the
channel. If an alias is given, remove the alias but don't
unsubscribe.
"""
key = "delcom"
aliases = ["delaliaschan, delchanalias"]
help_category = "Comms"
locks = "cmd:not perm(channel_banned)"
def func(self):
"Implementing the command. "
caller = self.caller
player = caller
if not self.args:
self.msg("Usage: delcom <alias or channel>")
return
ostring = self.args.lower()
channel = find_channel(caller, ostring, silent=True, noaliases=True)
if channel:
# we have given a channel name - unsubscribe
if not channel.has_connection(player):
self.msg("You are not listening to that channel.")
return
chkey = channel.key.lower()
# find all nicks linked to this channel and delete them
for nick in [nick for nick in caller.nicks.get(category="channel")
if nick.strvalue.lower() == chkey]:
nick.delete()
disconnect = channel.disconnect_from(player)
if disconnect:
self.msg("You stop listening to channel '%s'. Eventual aliases were removed." % channel.key)
return
else:
# we are removing a channel nick
channame = caller.nicks.get(key=ostring, category="channel")
channel = find_channel(caller, channame, silent=True)
if not channel:
self.msg("No channel with alias '%s' was found." % ostring)
else:
if caller.nicks.get(ostring, category="channel"):
caller.nicks.remove(ostring, category="channel")
self.msg("Your alias '%s' for channel %s was cleared." % (ostring, channel.key))
else:
self.msg("You had no such alias defined for this channel.")
class CmdAllCom(MuxPlayerCommand):
"""
allcom - operate on all channels
Usage:
allcom [on | off | who | destroy]
Allows the user to universally turn off or on all channels they are on,
as well as perform a 'who' for all channels they are on. Destroy deletes
all channels that you control.
Without argument, works like comlist.
"""
key = "allcom"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"Runs the function"
caller = self.caller
args = self.args
if not args:
caller.execute_cmd("@channels")
self.msg("(Usage: allcom on | off | who | destroy)")
return
if args == "on":
# get names of all channels available to listen to
# and activate them all
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
for channel in channels:
caller.execute_cmd("addcom %s" % channel.key)
elif args == "off":
#get names all subscribed channels and disconnect from them all
channels = [conn.channel for conn in PlayerChannelConnection.objects.get_all_player_connections(caller)]
for channel in channels:
caller.execute_cmd("delcom %s" % channel.key)
elif args == "destroy":
# destroy all channels you control
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'control')]
for channel in channels:
caller.execute_cmd("@cdestroy %s" % channel.key)
elif args == "who":
# run a who, listing the subscribers on visible channels.
string = "\n{CChannel subscriptions{n"
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
if not channels:
string += "No channels."
for channel in channels:
string += "\n{w%s:{n\n" % channel.key
conns = PlayerChannelConnection.objects.get_all_connections(channel)
if conns:
string += " " + ", ".join([conn.player.key for conn in conns])
else:
string += " <None>"
self.msg(string.strip())
else:
# wrong input
self.msg("Usage: allcom on | off | who | clear")
class CmdChannels(MuxPlayerCommand):
"""
@clist
Usage:
@channels
@clist
comlist
Lists all channels available to you, whether you listen to them or not.
Use 'comlist' to only view your current channel subscriptions.
Use addcom/delcom to join and leave channels
"""
key = "@channels"
aliases = ["@clist", "channels", "comlist", "chanlist", "channellist", "all channels"]
help_category = "Comms"
locks = "cmd: not pperm(channel_banned)"
def func(self):
"Implement function"
caller = self.caller
# all channels we have available to listen to
channels = [chan for chan in ChannelDB.objects.get_all_channels()
if chan.access(caller, 'listen')]
#print channels
if not channels:
self.msg("No channels available.")
return
# all channel we are already subscribed to
subs = [conn.channel for conn in PlayerChannelConnection.objects.get_all_player_connections(caller)]
#print subs
if self.cmdstring == "comlist":
# just display the subscribed channels with no extra info
comtable = prettytable.PrettyTable(["{wchannel",
"{wmy aliases",
"{wdescription"])
for chan in subs:
clower = chan.key.lower()
nicks = caller.nicks.get(category="channel")
comtable.add_row(["%s%s" % (chan.key, chan.aliases.all() and
"(%s)" % ",".join(chan.aliases.all()) or ""),
"%s".join(nick for nick in make_iter(nicks)
if nick and nick.lower() == clower),
chan.db.desc])
caller.msg("\n{wChannel subscriptions{n (use {w@channels{n to list all, {waddcom{n/{wdelcom{n to sub/unsub):{n\n%s" % comtable)
else:
# full listing (of channels caller is able to listen to)
comtable = prettytable.PrettyTable(["{wsub",
"{wchannel",
"{wmy aliases",
"{wlocks",
"{wdescription"])
for chan in channels:
clower = chan.key.lower()
nicks = caller.nicks.get(category="channel")
nicks = nicks or []
comtable.add_row([chan in subs and "{gYes{n" or "{rNo{n",
"%s%s" % (chan.key, chan.aliases.all() and
"(%s)" % ",".join(chan.aliases.all()) or ""),
"%s".join(nick for nick in make_iter(nicks)
if nick.lower() == clower),
str(chan.locks),
chan.db.desc])
caller.msg("\n{wAvailable channels{n (use {wcomlist{n,{waddcom{n and {wdelcom{n to manage subscriptions):\n%s" % comtable)
class CmdCdestroy(MuxPlayerCommand):
"""
@cdestroy
Usage:
@cdestroy <channel>
Destroys a channel that you control.
"""
key = "@cdestroy"
help_category = "Comms"
locks = "cmd: not pperm(channel_banned)"
def func(self):
"Destroy objects cleanly."
caller = self.caller
if not self.args:
self.msg("Usage: @cdestroy <channelname>")
return
channel = find_channel(caller, self.args)
if not channel:
self.msg("Could not find channel %s." % self.args)
return
if not channel.access(caller, 'control'):
self.msg("You are not allowed to do that.")
return
message = "%s is being destroyed. Make sure to change your aliases." % channel
msgobj = create.create_message(caller, message, channel)
channel.msg(msgobj)
channel.delete()
CHANNELHANDLER.update()
self.msg("Channel '%s' was destroyed." % channel)
class CmdCBoot(MuxPlayerCommand):
"""
@cboot
Usage:
@cboot[/quiet] <channel> = <player> [:reason]
Switches:
quiet - don't notify the channel
Kicks a player or object from a channel you control.
"""
key = "@cboot"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"implement the function"
if not self.args or not self.rhs:
string = "Usage: @cboot[/quiet] <channel> = <player> [:reason]"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
reason = ""
if ":" in self.rhs:
playername, reason = self.rhs.rsplit(":", 1)
searchstring = playername.lstrip('*')
else:
searchstring = self.rhs.lstrip('*')
player = self.caller.search(searchstring, player=True)
if not player:
return
if reason:
reason = " (reason: %s)" % reason
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
if not PlayerChannelConnection.objects.has_player_connection(player, channel):
string = "Player %s is not connected to channel %s." % (player.key, channel.key)
self.msg(string)
return
if not "quiet" in self.switches:
string = "%s boots %s from channel.%s" % (self.caller, player.key, reason)
channel.msg(string)
# find all player's nicks linked to this channel and delete them
for nick in [nick for nick in
player.character.nicks.get(category="channel") or []
if nick.db_real.lower() == channel.key]:
nick.delete()
# disconnect player
channel.disconnect_from(player)
CHANNELHANDLER.update()
class CmdCemit(MuxPlayerCommand):
"""
@cemit - send a message to channel
Usage:
@cemit[/switches] <channel> = <message>
Switches:
noheader - don't show the [channel] header before the message
sendername - attach the sender's name before the message
quiet - don't echo the message back to sender
Allows the user to broadcast a message over a channel as long as
they control it. It does not show the user's name unless they
provide the /sendername switch.
"""
key = "@cemit"
aliases = ["@cmsg"]
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"Implement function"
if not self.args or not self.rhs:
string = "Usage: @cemit[/switches] <channel> = <message>"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
message = self.rhs
if "sendername" in self.switches:
message = "%s: %s" % (self.key, message)
if not "noheader" in self.switches:
message = "[%s] %s" % (channel.key, message)
channel.msg(message)
if not "quiet" in self.switches:
string = "Sent to channel %s: %s" % (channel.key, message)
self.msg(string)
class CmdCWho(MuxPlayerCommand):
"""
@cwho
Usage:
@cwho <channel>
List who is connected to a given channel you have access to.
"""
key = "@cwho"
locks = "cmd: not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"implement function"
if not self.args:
string = "Usage: @cwho <channel>"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not channel.access(self.caller, "listen"):
string = "You can't access this channel."
self.msg(string)
return
string = "\n{CChannel subscriptions{n"
string += "\n{w%s:{n\n" % channel.key
conns = PlayerChannelConnection.objects.get_all_connections(channel)
if conns:
string += " " + ", ".join([conn.player.key for conn in conns])
else:
string += " <None>"
self.msg(string.strip())
class CmdChannelCreate(MuxPlayerCommand):
"""
@ccreate
channelcreate
Usage:
@ccreate <new channel>[;alias;alias...] = description
Creates a new channel owned by you.
"""
key = "@ccreate"
aliases = "channelcreate"
locks = "cmd:not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"Implement the command"
caller = self.caller
if not self.args:
self.msg("Usage @ccreate <channelname>[;alias;alias..] = description")
return
description = ""
if self.rhs:
description = self.rhs
lhs = self.lhs
channame = lhs
aliases = None
if ';' in lhs:
channame, aliases = [part.strip().lower()
for part in lhs.split(';', 1) if part.strip()]
aliases = [alias.strip().lower()
for alias in aliases.split(';') if alias.strip()]
channel = ChannelDB.objects.channel_search(channame)
if channel:
self.msg("A channel with that name already exists.")
return
# Create and set the channel up
lockstring = "send:all();listen:all();control:id(%s)" % caller.id
new_chan = create.create_channel(channame,
aliases,
description,
locks=lockstring)
new_chan.connect_to(caller)
self.msg("Created channel %s and connected to it." % new_chan.key)
class CmdClock(MuxPlayerCommand):
"""
@clock - changes channel access restrictions
Usage:
@clock <channel> [= <lockstring>]
Changes the lock access restrictions of a channel. If no
lockstring was given, view the current lock definitions.
"""
key = "@clock"
locks = "cmd:not pperm(channel_banned)"
aliases = ["@clock"]
help_category = "Comms"
def func(self):
"run the function"
if not self.args:
string = "Usage: @clock channel [= lockstring]"
self.msg(string)
return
channel = find_channel(self.caller, self.lhs)
if not channel:
return
if not self.rhs:
# no =, so just view the current locks
string = "Current locks on %s:" % channel.key
string = "%s\n %s" % (string, channel.locks)
self.msg(string)
return
# we want to add/change a lock.
if not channel.access(self.caller, "control"):
string = "You don't control this channel."
self.msg(string)
return
# Try to add the lock
channel.locks.add(self.rhs)
string = "Lock(s) applied. "
string += "Current locks on %s:" % channel.key
string = "%s\n %s" % (string, channel.locks)
self.msg(string)
class CmdCdesc(MuxPlayerCommand):
"""
@cdesc - set channel description
Usage:
@cdesc <channel> = <description>
Changes the description of the channel as shown in
channel lists.
"""
key = "@cdesc"
locks = "cmd:not pperm(channel_banned)"
help_category = "Comms"
def func(self):
"Implement command"
caller = self.caller
if not self.rhs:
self.msg("Usage: @cdesc <channel> = <description>")
return
channel = find_channel(caller, self.lhs)
if not channel:
self.msg("Channel '%s' not found." % self.lhs)
return
#check permissions
if not channel.access(caller, 'control'):
self.msg("You cannot admin this channel.")
return
# set the description
channel.db.desc = self.rhs
channel.save()
self.msg("Description of channel '%s' set to '%s'." % (channel.key,
self.rhs))
class CmdPage(MuxPlayerCommand):
"""
page - send private message
Usage:
page[/switches] [<player>,<player>,... = <message>]
tell ''
page <number>
Switch:
last - shows who you last messaged
list - show your last <number> of tells/pages (default)
Send a message to target user (if online). If no
argument is given, you will get a list of your latest messages.
"""
key = "page"
aliases = ['tell']
locks = "cmd:not pperm(page_banned)"
help_category = "Comms"
def func(self):
"Implement function using the Msg methods"
# this is a MuxPlayerCommand, which means caller will be a Player.
caller = self.caller
# get the messages we've sent (not to channels)
pages_we_sent = Msg.objects.get_messages_by_sender(caller,
exclude_channel_messages=True)
# get last messages we've got
pages_we_got = Msg.objects.get_messages_by_receiver(caller)
if 'last' in self.switches:
if pages_we_sent:
recv = ",".join(obj.key for obj in pages_we_sent[-1].receivers)
self.msg("You last paged {c%s{n:%s" % (recv,
pages_we_sent[-1].message))
return
else:
self.msg("You haven't paged anyone yet.")
return
if not self.args or not self.rhs:
pages = pages_we_sent + pages_we_got
pages.sort(lambda x, y: cmp(x.date_sent, y.date_sent))
number = 5
if self.args:
try:
number = int(self.args)
except ValueError:
self.msg("Usage: tell [<player> = msg]")
return
if len(pages) > number:
lastpages = pages[-number:]
else:
lastpages = pages
template = "{w%s{n {c%s{n to {c%s{n: %s"
lastpages = "\n ".join(template %
(utils.datetime_format(page.date_sent),
",".join(obj.key for obj in page.senders),
"{n,{c ".join([obj.name for obj in page.receivers]),
page.message) for page in lastpages)
if lastpages:
string = "Your latest pages:\n %s" % lastpages
else:
string = "You haven't paged anyone yet."
self.msg(string)
return
# We are sending. Build a list of targets
if not self.lhs:
# If there are no targets, then set the targets
# to the last person we paged.
if pages_we_sent:
receivers = pages_we_sent[-1].receivers
else:
self.msg("Who do you want to page?")
return
else:
receivers = self.lhslist
recobjs = []
for receiver in set(receivers):
if isinstance(receiver, basestring):
pobj = caller.search(receiver)
elif hasattr(receiver, 'character'):
pobj = receiver.character
else:
self.msg("Who do you want to page?")
return
if pobj:
recobjs.append(pobj)
if not recobjs:
self.msg("Noone found to page.")
return
header = "{wPlayer{n {c%s{n {wpages:{n" % caller.key
message = self.rhs
# if message begins with a :, we assume it is a 'page-pose'
if message.startswith(":"):
message = "%s %s" % (caller.key, message.strip(':').strip())
# create the persistent message object
create.create_message(caller, message,
receivers=recobjs)
# tell the players they got a message.
received = []
rstrings = []
for pobj in recobjs:
if not pobj.access(caller, 'msg'):
rstrings.append("You are not allowed to page %s." % pobj)
continue
pobj.msg("%s %s" % (header, message))
if hasattr(pobj, 'has_player') and not pobj.has_player:
received.append("{C%s{n" % pobj.name)
rstrings.append("%s is offline. They will see your message if they list their pages later." % received[-1])
else:
received.append("{c%s{n" % pobj.name)
if rstrings:
self.msg(rstrings="\n".join(rstrings))
self.msg("You paged %s with: '%s'." % (", ".join(received), message))
class CmdIRC2Chan(MuxCommand):
"""
@irc2chan - link evennia channel to an IRC channel
Usage:
@irc2chan[/switches] <evennia_channel> = <ircnetwork> <port> <#irchannel> <botname>
Switches:
/disconnect - this will delete the bot and remove the irc connection
to the channel.
/remove - "
/list - show all irc<->evennia mappings
Example:
@irc2chan myircchan = irc.dalnet.net 6667 myevennia-channel evennia-bot
This creates an IRC bot that connects to a given IRC network and channel.
It will relay everything said in the evennia channel to the IRC channel and
vice versa. The bot will automatically connect at server start, so this
comman need only be given once. The /disconnect switch will permanently
delete the bot. To only temporarily deactivate it, use the {w@services{n
command instead.
"""
key = "@irc2chan"
locks = "cmd:serversetting(IRC_ENABLED) and pperm(Immortals)"
help_category = "Comms"
def func(self):
"Setup the irc-channel mapping"
if not settings.IRC_ENABLED:
string = """IRC is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
if 'list' in self.switches:
# show all connections
connections = ExternalChannelConnection.objects.filter(db_external_key__startswith='irc_')
if connections:
table = prettytable.PrettyTable(["Evennia channel", "IRC channel"])
for conn in connections:
table.add_row([conn.channel.key, " ".join(conn.external_config.split('|'))])
string = "{wIRC connections:{n\n%s" % table
self.msg(string)
else:
self.msg("No connections found.")
return
if not self.args or not self.rhs:
string = "Usage: @irc2chan[/switches] <evennia_channel> = <ircnetwork> <port> <#irchannel> <botname>"
self.msg(string)
return
channel = self.lhs
self.rhs = self.rhs.replace('#', ' ') # to avoid Python comment issues
try:
irc_network, irc_port, irc_channel, irc_botname = \
[part.strip() for part in self.rhs.split(None, 3)]
irc_channel = "#%s" % irc_channel
except Exception:
string = "IRC bot definition '%s' is not valid." % self.rhs
self.msg(string)
return
if('disconnect' in self.switches or 'remove' in self.switches or
'delete' in self.switches):
chanmatch = find_channel(self.caller, channel, silent=True)
if chanmatch:
channel = chanmatch.key
ok = irc.delete_connection(channel,
irc_network,
irc_port,
irc_channel,
irc_botname)
if not ok:
self.msg("IRC connection/bot could not be removed, does it exist?")
else:
self.msg("IRC connection destroyed.")
return
channel = find_channel(self.caller, channel)
if not channel:
return
ok = irc.create_connection(channel,
irc_network,
irc_port,
irc_channel,
irc_botname)
if not ok:
self.msg("This IRC connection already exists.")
return
self.msg("Connection created. Starting IRC bot.")
class CmdIMC2Chan(MuxCommand):
"""
imc2chan - link an evennia channel to imc2
Usage:
@imc2chan[/switches] <evennia_channel> = <imc2_channel>
Switches:
/disconnect - this clear the imc2 connection to the channel.
/remove - "
/list - show all imc2<->evennia mappings
Example:
@imc2chan myimcchan = ievennia
Connect an existing evennia channel to a channel on an IMC2
network. The network contact information is defined in settings and
should already be accessed at this point. Use @imcchanlist to see
available IMC channels.
"""
key = "@imc2chan"
locks = "cmd:serversetting(IMC2_ENABLED) and pperm(Immortals)"
help_category = "Comms"
def func(self):
"Setup the imc-channel mapping"
if not settings.IMC2_ENABLED:
string = """IMC is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
if 'list' in self.switches:
# show all connections
connections = ExternalChannelConnection.objects.filter(db_external_key__startswith='imc2_')
if connections:
table = prettytable.PrettyTable(["Evennia channel", "IMC channel"])
for conn in connections:
table.add_row([conn.channel.key, conn.external_config])
string = "{wIMC connections:{n\n%s" % table
self.msg(string)
else:
self.msg("No connections found.")
return
if not self.args or not self.rhs:
string = "Usage: @imc2chan[/switches] <evennia_channel> = <imc2_channel>"
self.msg(string)
return
channel = self.lhs
imc2_channel = self.rhs
if('disconnect' in self.switches or 'remove' in self.switches or
'delete' in self.switches):
# we don't search for channels before this since we want
# to clear the link also if the channel no longer exists.
ok = imc2.delete_connection(channel, imc2_channel)
if not ok:
self.msg("IMC2 connection could not be removed, does it exist?")
else:
self.msg("IMC2 connection destroyed.")
return
# actually get the channel object
channel = find_channel(self.caller, channel)
if not channel:
return
ok = imc2.create_connection(channel, imc2_channel)
if not ok:
self.msg("The connection %s <-> %s already exists." % (channel.key, imc2_channel))
return
self.msg("Created connection channel %s <-> IMC channel %s." % (channel.key, imc2_channel))
class CmdIMCInfo(MuxCommand):
"""
imcinfo - package of imc info commands
Usage:
@imcinfo[/switches]
@imcchanlist - list imc2 channels
@imclist - list connected muds
@imcwhois <playername> - whois info about a remote player
Switches for @imcinfo:
channels - as @imcchanlist (default)
games or muds - as @imclist
whois - as @imcwhois (requires an additional argument)
update - force an update of all lists
Shows lists of games or channels on the IMC2 network.
"""
key = "@imcinfo"
aliases = ["@imcchanlist", "@imclist", "@imcwhois"]
locks = "cmd: serversetting(IMC2_ENABLED) and pperm(Wizards)"
help_category = "Comms"
def func(self):
"Run the command"
if not settings.IMC2_ENABLED:
string = """IMC is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
if "update" in self.switches:
# update the lists
import time
from src.comms.imc2lib import imc2_packets as pck
from src.comms.imc2 import IMC2_MUDLIST, IMC2_CHANLIST, IMC2_CLIENT
# update connected muds
IMC2_CLIENT.send_packet(pck.IMC2PacketKeepAliveRequest())
# prune inactive muds
for name, mudinfo in IMC2_MUDLIST.mud_list.items():
if time.time() - mudinfo.last_updated > 3599:
del IMC2_MUDLIST.mud_list[name]
# update channel list
IMC2_CLIENT.send_packet(pck.IMC2PacketIceRefresh())
self.msg("IMC2 lists were re-synced.")
elif("games" in self.switches or "muds" in self.switches
or self.cmdstring == "@imclist"):
# list muds
from src.comms.imc2 import IMC2_MUDLIST
muds = IMC2_MUDLIST.get_mud_list()
networks = set(mud.networkname for mud in muds)
string = ""
nmuds = 0
for network in networks:
table = prettytable.PrettyTable(["Name", "Url", "Host", "Port"])
for mud in (mud for mud in muds if mud.networkname == network):
nmuds += 1
table.add_row([mud.name, mud.url, mud.host, mud.port])
string += "\n{wMuds registered on %s:{n\n%s" % (network, table)
string += "\n %i Muds found." % nmuds
self.msg(string)
elif "whois" in self.switches or self.cmdstring == "@imcwhois":
# find out about a player
if not self.args:
self.msg("Usage: @imcwhois <playername>")
return
from src.comms.imc2 import IMC2_CLIENT
self.msg("Sending IMC whois request. If you receive no response, no matches were found.")
IMC2_CLIENT.msg_imc2(None,
from_obj=self.caller,
packet_type="imcwhois",
target=self.args)
elif(not self.switches or "channels" in self.switches or
self.cmdstring == "@imcchanlist"):
# show channels
from src.comms.imc2 import IMC2_CHANLIST, IMC2_CLIENT
channels = IMC2_CHANLIST.get_channel_list()
string = ""
nchans = 0
table = prettytable.PrettyTable(["Full name", "Name", "Owner", "Perm", "Policy"])
for chan in channels:
nchans += 1
table.add_row([chan.name, chan.localname, chan.owner,
chan.level, chan.policy])
string += "\n{wChannels on %s:{n\n%s" % (IMC2_CLIENT.factory.network, table)
string += "\n%i Channels found." % nchans
self.msg(string)
else:
# no valid inputs
string = "Usage: imcinfo|imcchanlist|imclist"
self.msg(string)
# unclear if this is working ...
class CmdIMCTell(MuxCommand):
"""
imctell - send a page to a remote IMC player
Usage:
imctell User@MUD = <msg>
imcpage "
Sends a page to a user on a remote MUD, connected
over IMC2.
"""
key = "imctell"
aliases = ["imcpage", "imc2tell", "imc2page"]
locks = "cmd: serversetting(IMC2_ENABLED)"
help_category = "Comms"
def func(self):
"Send tell across IMC"
if not settings.IMC2_ENABLED:
string = """IMC is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
from src.comms.imc2 import IMC2_CLIENT
if not self.args or not '@' in self.lhs or not self.rhs:
string = "Usage: imctell User@Mud = <msg>"
self.msg(string)
return
target, destination = self.lhs.split("@", 1)
message = self.rhs.strip()
data = {"target":target, "destination":destination}
# send to imc2
IMC2_CLIENT.msg_imc2(message, from_obj=self.caller, packet_type="imctell", **data)
self.msg("You paged {c%s@%s{n (over IMC): '%s'." % (target, destination, message))
# RSS connection
class CmdRSS2Chan(MuxCommand):
"""
@rss2chan - link evennia channel to an RSS feed
Usage:
@rss2chan[/switches] <evennia_channel> = <rss_url>
Switches:
/disconnect - this will stop the feed and remove the connection to the
channel.
/remove - "
/list - show all rss->evennia mappings
Example:
@rss2chan rsschan = http://code.google.com/feeds/p/evennia/updates/basic
This creates an RSS reader that connects to a given RSS feed url. Updates
will be echoed as a title and news link to the given channel. The rate of
updating is set with the RSS_UPDATE_INTERVAL variable in settings (default
is every 10 minutes).
When disconnecting you need to supply both the channel and url again so as
to identify the connection uniquely.
"""
key = "@rss2chan"
locks = "cmd:serversetting(RSS_ENABLED) and pperm(Immortals)"
help_category = "Comms"
def func(self):
"Setup the rss-channel mapping"
if not settings.RSS_ENABLED:
string = """RSS is not enabled. You need to activate it in game/settings.py."""
self.msg(string)
return
if 'list' in self.switches:
# show all connections
connections = ExternalChannelConnection.objects.filter(db_external_key__startswith='rss_')
if connections:
table = prettytable.PrettyTable(["Evennia channel", "RSS url"])
for conn in connections:
table.add_row([conn.channel.key, conn.external_config.split('|')[0]])
string = "{wConnections to RSS:{n\n%s" % table
self.msg(string)
else:
self.msg("No connections found.")
return
if not self.args or not self.rhs:
string = "Usage: @rss2chan[/switches] <evennia_channel> = <rss url>"
self.msg(string)
return
channel = self.lhs
url = self.rhs
if('disconnect' in self.switches or 'remove' in self.switches or
'delete' in self.switches):
chanmatch = find_channel(self.caller, channel, silent=True)
if chanmatch:
channel = chanmatch.key
ok = rss.delete_connection(channel, url)
if not ok:
self.msg("RSS connection/reader could not be removed, does it exist?")
else:
self.msg("RSS connection destroyed.")
return
channel = find_channel(self.caller, channel)
if not channel:
return
interval = settings.RSS_UPDATE_INTERVAL
if not interval:
interval = 10*60
ok = rss.create_connection(channel, url, interval)
if not ok:
self.msg("This RSS connection already exists.")
return
self.msg("Connection created. Starting RSS reader.")
| |
import numpy
from astLib import astCoords, astWCS
from astropy.io import fits
from itertools import count, izip
from scipy import optimize
from scipy.ndimage import zoom
def contour_levels(x, y=[], bins=10, levels=(0.68,0.95)):
"""
Get the contour levels corresponding to a set of percentiles (given as
fraction of 1) for a 2d histogram.
Parameters
----------
x : array of floats
if y is given then x must be a 1d array. If y is not given then
x should be a 2d array
y : array of floats (optional)
1d array with the same number of elements as x
bins : argument of numpy.histogram2d
levels : list of floats between 0 and 1
the fractional percentiles of the data that should be above the
returned values
Returns
-------
level_values : list of floats, same length as *levels*
The values of the histogram above which the fractional percentiles
of the data given by *levels* are
"""
if len(y) > 0:
if len(x) != len(y):
msg = 'Invalid input for arrays; must be either 1 2d array'
msg += ' or 2 1d arrays'
raise ValueError(msg)
else:
if len(numpy.array(x).shape) != 2:
msg = 'Invalid input for arrays; must be either 1 2d array'
msg += ' or 2 1d arrays'
raise ValueError(msg)
def findlevel(lo, hist, level):
return 1.0 * hist[hist >= lo].sum()/hist.sum() - level
if len(x) == len(y):
hist, xedges, yedges = numpy.histogram2d(x, y, bins=bins)
hist = numpy.transpose(hist)
extent = (xedges[0], xedges[-1], yedges[0], yedges[-1])
elif len(y) == 0:
hist = numpy.array(x)
level_values = [optimize.bisect(findlevel, hist.min(), hist.max(),
args=(hist,l)) for l in levels]
return level
def contours_external(ax, imgwcs, contourfile, levels, colors, lw=1):
"""
Draw contours from contourfile in the frame of imgwcs.
"""
contourwcs = astWCS.WCS(contourfile)
contourdata = fits.getdata(contourfile)
while len(contourdata.shape) > 2:
contourdata = contourdata[0]
# convert coords
ny, nx = contourdata.shape
xo, yo = contourwcs.pix2wcs(-1, -1)
x1, y1 = contourwcs.pix2wcs(nx, ny)
xo, yo = imgwcs.wcs2pix(xo, yo)
x1, y1 = imgwcs.wcs2pix(x1, y1)
contourdata = zoom(contourdata, 3, order=3)
ax.contour(contourdata, levels, colors=colors, linewidths=lw,
extent=(xo,x1,yo,y1))
return
def corner(X, config=None, names='', labels=None, bins=20, bins1d=20,
clevels=(0.68,0.95), contour_reference='samples',
truths=None, truths_in_1d=False, truth_color='r',
smooth=False, likelihood=None, likesmooth=1, colors='k', cmap=None,
ls1d='-', ls2d='solid', style1d='curve', medians1d=True,
percentiles1d=True, background=None, bweight=None, bcolor='r',
alpha=0.5, limits=None,
ticks=None, show_contour=True, top_labels=False, output='',
verbose=False, **kwargs):
"""
Do a corner plot (e.g., with the posterior parameters of an MCMC chain).
Note that there may still be some issues with the tick labels.
Parameters
----------
X : array-like
all posterior parameters. Can also be the outputs of
more than one chain, given as an array of arrays of models
(e.g., X = [[A1, B1, C1], [A2, B2, C2]])
config : str (optional - NOT YET IMPLEMENTED)
name of file containing any parameters whose default values
should be modified. Format of the file is two columns,
where the first is the parameter name as listed here,
and the second is the value for that parameter. If the
parameter takes a list of values they should be comma-
separated, and multiple entries semi-colon-separated.
For example, a file containing
bins 20
bins1d 50
colors yellow
ticks 2,3,4;10,11,12;3.2,3.3,3.4
would only modify these parameters. Note that because of the
content of the 'ticks' parameter, the chain must be a
three-parameter model.
names : list of strings (optional)
Names for each of the chains. Will be used to show a legend
in the (empty) upper corner
labels : list of strings (optional)
names of the parameters
bins : int or array of ints (default 20)
Number of bins for the contours in the off-diagonal panels.
Should be one value per chain, one value per parameter,
or have shape (nchains,nparams)
bins1d : int or array of ints (default 20)
Number of bins for the histograms or curves in the diagonal
panels. Should be one value per chain, one value per
parameter, or have shape (nchains,nparams)
clevels : list of floats between 0 and 1 (default: (0.68,0.95))
percentiles at which to show contours
contour_reference : {'samples', 'likelihood'} (default 'samples')
whether to draw contour on fractions of samples or
on likelihood levels. In the former case, *clevels*
must be floats between 0 and 1; in the latter, the
levels of the log-likelihood.
truths : {list of floats, 'medians', None} (default None)
reference values for each parameter, to be shown in
each panel
smooth : float (optional)
the width of the gaussian with which to smooth the
contours in the off-diagonal panels. If no value is given,
the contours are not smoothed.
likelihood : array of floats (optional)
the likelihood surface, to be shown as a histogram in the
diagonals.
likesmooth : int (default 1000)
the number of maxima to average over to show the
likelihood surface
colors : any argument taken by the *colors* argument of
pylab.contour(), or a tuple of them if more than one
model is to be plotted
ls1d : {'solid','dashed','dashdot','dotted'} (default 'solid')
linestyle for the diagonal plots, if style1d=='curve'.
Can specify more than one value as a list if more than one
model is being plotted.
ls2d : {'solid','dashed','dashdot','dotted'} (default 'solid')
linestyle for the contours. Can specify more than one value
as a list if more than one model is being plotted.
style1d : {'bar', 'step', 'stepfilled', 'curve'} (default 'curve')
if 'curve', plot the 1d posterior as a curve; else this
parameter is passed to the 'histtype' argument in
pyplot.hist()
medians1d : bool (default True)
whether to show the medians in the diagonal panels as
vertical lines
percentiles1d : bool (default True)
whether to show selected percentiles (see *clevels*) in the
diagonal panels as vertical lines
background : {None, 'points', 'density', 'filled'} (default None)
If not None, then either points, a smoothed 2d histogram,
or filled contours are plotted beneath contours.
bweight : array-like, same length as e.g., A1
values to color-code background points
bcolor : color property, consistent with *background*
color of the points or filled contours, or colormap of the
2d density background. If truths are given they will be
shown in red and it is therefore recommended that the
colors be on a blue scale.
alpha : float between 0 and 1 (default 0.5)
transparency of the points if shown
limits : list of length-2 lists (optional)
a list of plot limits for each of the parameters.
ticks : list of lists (optional)
a list of tick positions for each parameter, to be printed
both in the x and y axes as appropriate.
top_labels : boolean (default False)
whether to show axis and tick labels at the top of each
diagonal plot
output : string (optional)
filename to save the plot.
verbose : boolean
whether to print the marginalized values per variable
kwargs : keyword arguments to be passed to pylab.contour()
Returns
-------
fig, axes_diagonal, axes_off : pylab figure and axes (diagonal and
off-diagonal) instances
"""
import pylab
from numpy import append, array, digitize, exp, histogram, histogram2d
from numpy import linspace, median, percentile, sort, transpose
from scipy.ndimage.filters import gaussian_filter
if style1d == 'curve':
from scipy import interpolate
# not yet implemented
options = _load_corner_config(config)
# the depth of an array or list. Useful to assess the proper format of
# arguments. Returns zero if scalar.
depth = lambda L: (hasattr(L, '__iter__') and max(map(depth,L)) + 1) or 0
nchains = (len(X) if depth(X) > 1 else 1)
if nchains > 1:
ndim = len(X[0])
nsamples = len(X[0][0])
if background == 'points':
background = None
else:
ndim = len(X)
nsamples = len(X[0])
X = (X,)
if nsamples == 0:
msg = 'plottools.corner: received empty array.'
msg += ' It is possible that you set the burn-in to be longer'
msg += ' than the chain itself!'
raise ValueError(msg)
# check ticks
if ticks is not None:
if len(ticks) != ndim:
print 'WARNING: number of tick lists does not match',
print 'number of parameters'
ticks = None
# check limits
if limits is not None:
if len(limits) != ndim:
print 'WARNING: number of limit lists does not match',
print 'number of parameters'
limits = None
# check clevels - they should be fractions between 0 and 1.
if 1 < max(clevels) <= 100:
clevels = [cl/100. for cl in clevels]
elif max(clevels) > 100:
msg = 'ERROR: contour levels must be between 0 and 1 or between'
msg += ' 0 and 100'
print msg
exit()
# check truths
if truths is not None:
if len(truths) != ndim:
truths = None
# check likelihood
if likelihood is not None:
msg = 'WARNING: likelihood format not right - ignoring'
lshape = likelihood.shape
if len(lshape) == 1:
likelihood = [likelihood]
if lshape[0] != nchains or lshape[1] != nsamples \
or len(lshape) != 2:
print msg
likelihood = None
try:
if len(smooth) != len(X[0]):
print 'WARNING: number smoothing widths must be equal to',
print 'number of parameters'
smooth = [0 for i in X[0]]
except TypeError:
if smooth not in (False, None):
smooth = [smooth for i in X[0]]
# check the binning scheme.
meta_bins = [bins, bins1d]
for i, bname in enumerate(('bins','bins1d')):
bi = meta_bins[i]
# will fail if bi is a scalar
try:
bidepth = depth(bi)
except TypeError:
bidepth = 0
# will be the same message in all cases below
msg = 'ERROR: number of {0} must equal either number'.format(bname)
msg += ' of chains or number of parameters, or have shape'
msg += ' (nchains,nparams)'
# this means binning will be the same for all chains
ones = numpy.ones((nchains,ndim))
# is it a scalar?
if bidepth == 0:
meta_bins[i] = bi * ones
# or a 1d list?
elif bidepth == 1:
bi = numpy.array(bi)
if len(bi) == ndim:
meta_bins[i] = ones * bi
elif len(bi) == nchains:
meta_bins[i] = ones * bi[:,numpy.newaxis]
else:
print msg
exit()
elif (bidepth == 2 and nchains > 1 and \
numpy.array(bi).shape != ones.shape) or \
bidepth > 2:
print msg
exit()
bins, bins1d = meta_bins
# figure size
if ndim > 3:
figsize = 2 * ndim
else:
figsize= 3 * ndim
axsize = 0.85 / ndim
if len(X) == 1:
if isinstance(colors, basestring):
color1d = colors
else:
color1d = 'k'
else:
if len(colors) == len(X):
color1d = colors
# supports up to 12 names (plot would be way overcrowded!)
else:
color1d = ('g', 'orange', 'c', 'm', 'b', 'y',
'g', 'orange', 'c', 'm', 'b', 'y')
if isinstance(ls1d, basestring):
ls1d = [ls1d for i in X]
if isinstance(ls2d, basestring):
ls2d = [ls2d for i in X]
# all set!
axvls = ('--', ':', '-.')
fig = pylab.figure(figsize=(figsize,figsize))
# diagonals first
plot_ranges = []
axes_diagonal = []
# for backward compatibility
histtype = style1d.replace('hist', 'step')
for i in xrange(ndim):
ax = pylab.axes([0.1+axsize*i, 0.95-axsize*(i+1),
0.95*axsize, 0.95*axsize],
yticks=[])
axes_diagonal.append(ax)
if i < ndim-1:
ax.set_xticklabels([])
peak = 0
edges = []
for m, Xm in enumerate(X):
edges.append([])
if style1d == 'curve':
ho, e = histogram(Xm[i], bins=bins1d[m][i], normed=True)
xo = 0.5 * (e[1:] + e[:-1])
xn = linspace(xo.min(), xo.max(), 500)
n = interpolate.spline(xo, ho, xn)
ax.plot(xn, n, ls=ls1d[m], color=color1d[m])
else:
n, e, patches = ax.hist(Xm[i], bins=bins1d[m][i],
histtype=histtype,
color=color1d[m], normed=True)
edges[-1].append(e)
if n.max() > peak:
peak = n.max()
area = n.sum()
if medians1d:
ax.axvline(median(Xm[i]), ls='-', color=color1d[m])
if verbose:
if len(names) == len(X):
print names[m]
if labels is not None:
print ' %s' %(labels[i]),
if truths is None:
print ''
else:
print '({0})'.format(truths[i])
print ' ', median(Xm[i])
for p, ls in izip(clevels, axvls):
v = [percentile(Xm[i], 100*(1-p)/2.),
percentile(Xm[i], 100*(1+p)/2.)]
if percentiles1d:
ax.axvline(v[0], ls=ls, color=color1d[m])
ax.axvline(v[1], ls=ls, color=color1d[m])
if verbose:
print ' p%.1f %.2f %.2f' %(100*p, v[0], v[1])
if likelihood is not None:
for m, Xm, Lm, e in izip(count(), X, likelihood, edges):
#print Lm.min(), Lm.max()
binning = digitize(Xm[i], e[m])
xo = 0.5 * (e[m][1:] + e[m][:-1])
# there can be nan's because some bins have no data
valid = array([(len(Lm[binning == ii]) > 0)
for ii in xrange(1, len(e[m]))])
Lmbinned = [median(sort(Lm[binning == ii+1])[-likesmooth:])
for ii, L in enumerate(valid) if L]
#Lmbinned = array(Lmbinned) + 100
# normalized to the histogram area
Lmbinned = exp(Lmbinned)
Lmbinned -= Lmbinned.min()
Lmbinned /= Lmbinned.sum() / area
ax.plot(xo[valid], Lmbinned, '-',
color=truth_color, lw=3, zorder=-10)
if truths_in_1d and truths is not None:
ax.axvline(truths[i], ls='-', color=truth_color,
zorder=10)
if i == ndim-1 and labels is not None:
if len(labels) >= ndim:
ax.set_xlabel(labels[i])
# to avoid overcrowding tick labels
if ticks is None:
tickloc = pylab.MaxNLocator(4)
ax.xaxis.set_major_locator(tickloc)
else:
ax.set_xticks(ticks[i])
pylab.xticks(rotation=45)
if limits is not None:
ax.set_xlim(*limits[i])
pylab.ylim(0, 1.1*peak)
if i != ndim-1:
ax.set_xticklabels([])
if top_labels:
topax = ax.twiny()
topax.set_xlim(*ax.get_xlim())
topax.xaxis.set_major_locator(tickloc)
topax.set_xlabel(labels[i])
plot_ranges.append(ax.get_xlim())
# lower off-diagonals
axes_off = []
for i in xrange(1, ndim): # vertical axes
for j in xrange(i): # horizontal axes
ax = pylab.axes([0.1+axsize*j, 0.95-axsize*(i+1),
0.95*axsize, 0.95*axsize])
axes_off.append(ax)
extent = append(plot_ranges[j], plot_ranges[i])
for m, Xm in enumerate(X):
h, xe, ye = histogram2d(Xm[j], Xm[i], bins=bins[m][i])
h = h.T
extent = (xe[0], xe[-1], ye[0], ye[-1])
if smooth not in (False, None):
h = gaussian_filter(h, (smooth[i],smooth[j]))
levels = contour_levels(Xm[j], Xm[i], bins=bins[m][i],
levels=clevels)
if background == 'points':
if not (cmap is None or bweight is None):
ax.scatter(Xm[j], Xm[i], c=bweight, marker='.',
s=4, lw=0, cmap=cmap, zorder=-10)
else:
ax.plot(Xm[j], Xm[i], ',',
color=bcolor, alpha=alpha, zorder=-10)
elif background == 'density':
ax.imshow([Xm[i], Xm[j]], cmap=cm.Reds,
extent=extent)
elif background == 'filled':
clvs = append(clevels, 1)
lvs = contour_levels(Xm[j], Xm[i], bins=bins[m][i],
levels=clvs)
try:
if hasattr(bcolor[0], '__iter__'):
bcolor = [bc for bc in bcolor]
except TypeError:
pass
for l in xrange(len(levels), 0, -1):
if len(bcolor[l-1]) == 3:
bcolor[l-1] = [bcolor[l-1]]
ax.contourf(h, (lvs[l-1],lvs[l]),
extent=extent, colors=bcolor[l-1])
if show_contour:
ax.contour(h, levels, colors=color1d[m],
linestyles=ls2d[m], extent=extent,
zorder=10, **kwargs)
if truths is not None:
#pylab.axvline(truths[j], ls='-', color=(0,0.5,1))
#pylab.axhline(truths[i], ls='-', color=(0,0.5,1))
ax.plot(truths[j], truths[i], '+',
color=truth_color, mew=4, ms=12, zorder=10)
if labels is not None:
if len(labels) == ndim:
if j == 0:
ax.set_ylabel(labels[i])
if i == ndim - 1:
ax.set_xlabel(labels[j])
if j > 0:
ax.set_yticklabels([])
if i < ndim - 1:
ax.set_xticklabels([])
ax.set_xlim(*plot_ranges[j])
ax.set_ylim(*plot_ranges[i])
if ticks is not None:
ax.set_xticks(ticks[j])
ax.set_yticks(ticks[i])
else:
# to avoid overcrowding tick labels
xloc = pylab.MaxNLocator(4)
ax.xaxis.set_major_locator(xloc)
yloc = pylab.MaxNLocator(4)
ax.yaxis.set_major_locator(yloc)
pylab.xticks(rotation=45)
# dummy legend axes
if len(X) > 1 and len(names) == len(X):
lax = pylab.axes([0.1+axsize*(ndim-1), 0.95,
0.95*axsize, 0.95*axsize],
xticks=[], yticks=[])
lax.set_frame_on(False)
for c, model in izip(color1d, names):
pylab.plot([], [], ls='-', lw=2, color=c, label=model)
lg = pylab.legend(loc='center', ncol=1)
lg.get_frame().set_alpha(0)
if output:
pylab.savefig(output, format=output[-3:])
pylab.close()
return fig, axes_diagonal, axes_off
def wcslabels(wcs, xlim, ylim, xsep='00:00:01', ysep='00:00:15'):
"""
Get WCS ticklabels
Parameters
----------
wcs : astWCS.WCS instance
the wcs of the image to be shown
xlim : sequence of length 2
the minimum and maximum values of the x axis
ylim : sequence of length 2
the minimum and maximum values of the y axis
xsep : string
separation of right ascension ticks in the x axis,
in colon-separated hms format
xsep : string
separation of declination ticks in the y axis, in
colon-separated dms format
Returns
-------
[xticks, xticklabels] : lists containing the positions and labels
for right ascension hms labels
[yticks, yticklabels] : lists containing the positions and labels
for declination dms labels
"""
def roundout(label):
if label[2] > 59:
label[1] += 1
label[2] -= 60
if label[1] > 59:
label[0] += 1
label[1] -= 60
return label
left, right = xlim
bottom, top = ylim
wcslim = [wcs.pix2wcs(left, bottom), wcs.pix2wcs(right, top)]
ralim, declim = numpy.transpose(wcslim)
hmslim = [astCoords.decimal2hms(x, ':') for x in ralim]
dmslim = [astCoords.decimal2dms(y, ':') for y in declim]
if dmslim[0][0] == '-':
sgn = -1
else:
sgn = 1
# assumes that East is left, as usual
xsep = numpy.array(xsep.split(':'), dtype=int)
ysep = numpy.array(ysep.split(':'), dtype=int)
xseconds = [float(h.split(':')[2]) for h in hmslim[::-1]]
yseconds = [float(d.split(':')[2]) for d in dmslim]
xlim = []
ylim = []
for i in xrange(2):
xlim.append(hmslim[-i-1].split(':'))
xlim[i] = [int(xlim[i][0]), int(xlim[i][1]), float(xlim[i][2])]
ylim.append(dmslim[i].split(':'))
ylim[i] = [int(ylim[i][0]), int(ylim[i][1]), float(ylim[i][2])]
if dmslim[0][0] == '-':
ylim = ylim[::-1]
xticklabels = [numpy.array([int(x) for x in xlim[0]])]
yticklabels = [numpy.array([int(y) for y in ylim[0]])]
for i in xrange(3):
if xsep[i] != 0:
while xticklabels[0][i] % xsep[i] != 0:
xticklabels[0][i] += 1
if ysep[i] != 0:
while yticklabels[0][i] % ysep[i] != 0:
yticklabels[0][i] += 1
xticklabels[0] = roundout(xticklabels[0])
yticklabels[0] = roundout(yticklabels[0])
while numpy.any(xticklabels[-1] + xsep < xlim[1]):
xticklabels.append(xticklabels[-1] + xsep)
xticklabels[-1] = roundout(xticklabels[-1])
while numpy.any(yticklabels[-1] + ysep < ylim[1]):
yticklabels.append(yticklabels[-1] + ysep)
yticklabels[-1] = roundout(yticklabels[-1])
for i in xrange(len(xticklabels)):
xticklabels[i] = [('%2d' %x).replace(' ', '0')
for x in xticklabels[i]]
for i in xrange(len(yticklabels)):
yticklabels[i] = [('%2d' %y).replace(' ', '0')
for y in yticklabels[i]]
if -10 < ylim[0][0] < 0:
yticklabels[i][0] = '-0%d' %abs(int(yticklabels[i][0]))
xticklabels = [':'.join(x) for x in xticklabels]
yticklabels = [':'.join(y) for y in yticklabels]
if dmslim[0][0] == '-':
yticklabels = ['-{0}'.format(y) if y[0] != '-' else y
for y in yticklabels]
x = [astCoords.hms2decimal(xtl, ':') for xtl in xticklabels]
y = [astCoords.dms2decimal(ytl, ':') for ytl in yticklabels]
xticks = [wcs.wcs2pix(i, min(declim))[0] for i in x]
yticks = [wcs.wcs2pix(max(ralim), i)[1] for i in y]
return [xticks, xticklabels], [yticks, yticklabels]
def _load_corner_config(config):
"""
Not implemented!
"""
options = {}
# is there a configuration file at all!?
if config is None:
return options
data = numpy.loadtxt(config, dtype=str, unpack=True)
for key, value in izip(*data):
values = value.split(';')
ndim = len(values)
values = [val.split(',') for val in values]
for i in xrange(ndim):
for j in xrange(len(values)):
try:
values[i][j] = float(values[i][j])
except ValueError:
pass
try:
values[i][j] = int(values[i][j])
except ValueError:
pass
if ndim == 1:
values = values[0]
options[key] = values
return options
| |
# -*- coding: utf-8 -*-
"""
Sphinx test suite utilities
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
This is largely lifted directly from the Sphinx repository,
because the Sphinx test directory isn't included with the
installable package.
The ``with_sphinx`` function has been improved to use a kwarg
instead of the first positional argument for the Sphinx
Application, which allows it to work with TestCase methods.
"""
import sys
from six import StringIO
import tempfile
import shutil
import re
from codecs import open
try:
from functools import wraps
except ImportError:
# functools is new in 2.4
wraps = lambda f: (lambda w: w)
import docutils.frontend
import docutils.parsers.rst
import docutils.writers.html4css1
import docutils.utils
from sphinx import application
from sphinx.theming import Theme
from sphinx.ext.autodoc import AutoDirective
from classycode.tests.path import path
# from nose import tools, SkipTest
__all__ = [
'test_root', 'test_roots', 'raises', 'raises_msg',
'skip_if', 'skip_unless', 'skip_unless_importable', 'Struct',
'ListOutput', 'TestApp', 'with_app', 'gen_with_app',
'path', 'with_tempdir', 'write_file',
'sprint', 'remove_unicode_literals',
]
test_root = path(__file__).parent.joinpath('root').abspath()
test_roots = path(__file__).parent.joinpath('roots').abspath()
def _excstr(exc):
if type(exc) is tuple:
return str(tuple(map(_excstr, exc)))
return exc.__name__
def raises(exc, func, *args, **kwds):
"""
Raise :exc:`AssertionError` if ``func(*args, **kwds)`` does not
raise *exc*.
"""
try:
func(*args, **kwds)
except exc:
pass
else:
raise AssertionError('%s did not raise %s' %
(func.__name__, _excstr(exc)))
def raises_msg(exc, msg, func, *args, **kwds):
"""
Raise :exc:`AssertionError` if ``func(*args, **kwds)`` does not
raise *exc*, and check if the message contains *msg*.
"""
try:
func(*args, **kwds)
except exc as err:
assert msg in str(err), "\"%s\" not in \"%s\"" % (msg, err)
else:
raise AssertionError('%s did not raise %s' %
(func.__name__, _excstr(exc)))
def skip_if(condition, msg=None):
"""Decorator to skip test if condition is true."""
def deco(test):
@tools.make_decorator(test)
def skipper(*args, **kwds):
if condition:
raise SkipTest(msg or 'conditional skip')
return test(*args, **kwds)
return skipper
return deco
def skip_unless(condition, msg=None):
"""Decorator to skip test if condition is false."""
return skip_if(not condition, msg)
def skip_unless_importable(module, msg=None):
"""Decorator to skip test if module is not importable."""
try:
__import__(module)
except ImportError:
return skip_if(True, msg)
else:
return skip_if(False, msg)
class Struct(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
class ListOutput(object):
"""
File-like object that collects written text in a list.
"""
def __init__(self, name):
self.name = name
self.content = []
def reset(self):
del self.content[:]
def write(self, text):
self.content.append(text)
class TestApp(application.Sphinx):
"""
A subclass of :class:`Sphinx` that runs on the test root, with some
better default values for the initialization parameters.
"""
def __init__(self, srcdir=None, confdir=None, outdir=None, doctreedir=None,
buildername='html', confoverrides=None,
status=None, warning=None, freshenv=None,
warningiserror=True, tags=None,
confname='conf.py', cleanenv=False):
application.CONFIG_FILENAME = confname
self.cleanup_trees = [test_root / 'generated']
if srcdir is None:
srcdir = test_root
if srcdir == '(temp)':
tempdir = path(tempfile.mkdtemp())
self.cleanup_trees.append(tempdir)
temproot = tempdir / 'root'
test_root.copytree(temproot)
srcdir = temproot
elif srcdir == '(empty)':
tempdir = path(tempfile.mkdtemp())
self.cleanup_trees.append(tempdir)
temproot = tempdir / 'root'
temproot.makedirs()
(temproot / 'conf.py').write_text('')
srcdir = temproot
else:
srcdir = path(srcdir)
self.builddir = srcdir.joinpath('_build')
self.cleanup_trees.append(self.builddir)
if confdir is None:
confdir = srcdir
if outdir is None:
outdir = srcdir.joinpath(self.builddir, buildername)
if not outdir.isdir():
outdir.makedirs()
self.cleanup_trees.insert(0, outdir)
if doctreedir is None:
doctreedir = srcdir.joinpath(srcdir, self.builddir, 'doctrees')
if not doctreedir.isdir():
doctreedir.makedirs()
if cleanenv:
self.cleanup_trees.insert(0, doctreedir)
if confoverrides is None:
confoverrides = {}
if status is None:
status = StringIO()
if warning is None:
warning = ListOutput('stderr')
if freshenv is None:
freshenv = False
if warningiserror is None:
warningiserror = False
application.Sphinx.__init__(self, srcdir, confdir, outdir, doctreedir,
buildername, confoverrides, status, warning,
freshenv, warningiserror, tags)
def cleanup(self, doctrees=False):
Theme.themes.clear()
AutoDirective._registry.clear()
for tree in self.cleanup_trees:
shutil.rmtree(tree, True)
def with_sphinx(*args, **kwargs):
"""
Make a TestApp with args and kwargs, pass it to the test and clean up
properly.
"""
def generator(target):
if isinstance(target, type):
# decorating a class, look for test methods to decorate
for name, prop in target.__dict__.items():
if name.startswith('test'):
setattr(target, name, generator(prop))
return target
@wraps(target)
def deco(*args2, **kwargs2):
app = TestApp(*args, **kwargs)
try:
target(*args2, sphinx_app=app, **kwargs2)
finally:
app.cleanup()
return deco
return generator
def gen_with_app(*args, **kwargs):
"""
Decorate a test generator to pass a TestApp as the first argument to the
test generator when it's executed.
"""
def generator(func):
@wraps(func)
def deco(*args2, **kwargs2):
app = TestApp(*args, **kwargs)
for item in func(app, *args2, **kwargs2):
yield item
# don't execute cleanup if test failed
app.cleanup()
return deco
return generator
def with_tempdir(func):
def new_func(*args, **kwds):
tempdir = path(tempfile.mkdtemp())
func(tempdir, *args, **kwds)
tempdir.rmtree()
new_func.__name__ = func.__name__
return new_func
def write_file(name, contents, encoding=None):
if encoding is None:
mode = 'wb'
if isinstance(contents, unicode):
contents = contents.encode('ascii')
else:
mode = 'w'
f = open(str(name), mode, encoding=encoding)
f.write(contents)
f.close()
def sprint(*args):
sys.stderr.write(' '.join(map(str, args)) + '\n')
_unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
def remove_unicode_literals(s):
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
def make_document(source_name, contents, **kwargs):
"""Parse ```contents``` into a docutils document."""
parser = docutils.parsers.rst.Parser()
document = docutils.utils.new_document(
source_name,
docutils.frontend.OptionParser(
components=(
docutils.parsers.rst.Parser,
docutils.writers.html4css1.Writer,
),
).get_default_values(),
)
for name in kwargs:
setattr(document.settings, name, kwargs[name])
parser.parse(contents, document)
return document
| |
""" The text editor module for pySUMO. The TextEditor widget is the main pySUMO
widget. It contains the textual representation of the currently loaded
Ontologies allowing conventional kif editing with features such as syntax
highlighting and autocompletion.
"""
from PySide.QtCore import Qt, QRegExp, Slot, QRect, QPoint, QSize, QTimer
from PySide.QtGui import QApplication, QCompleter, QTextCursor, QWidget, QPainter
from PySide.QtGui import QFont, QSyntaxHighlighter, QShortcut, QKeySequence, QPrintDialog, QColor
from PySide.QtGui import QTextCharFormat, QDialog, QPrinter, QPrinterInfo, QPrintPreviewDialog
from collections import OrderedDict
import re
import string
from pySUMOQt.Designer.TextEditor import Ui_Form
from pySUMOQt.Widget.Widget import RWWidget
from pysumo.syntaxcontroller import Ontology
from pysumo.parser import ParseError
from pySUMOQt.Dialog import str_to_bool
class TextEditor(RWWidget, Ui_Form):
""" Contains many features of popular text editors adapted for use with
Ontologies such as syntax highlighting, and autocompletion. One column on
the left of the text editor contains line numbers and another contains
other contextual information such as whether a block of code has been
hidden/collapsed and can be displayed/expanded later. It also contains an
incremental search and an interface to pySUMO's settings so font size and
family can be changed at will.
Variables:
- syntax_highlighter: The syntax highlighter object for the text editor.
Methods:
- __init__: Initalizes the Object and the QPlainTextEdit
- commit: Notifies other Widgets of changes.
- show_autocomplete: Returns autocompletion choices.
- getWidget: returns the QPlainTextEdit
- numberbarPaint: Paints the numberbar
- searchCompletion: Asks QCompleter if a whole word exists starting with user input
- hideFrom: Starts hides all lines from the ()-block started by line
- insertCompletion: Puts the selected Completion into the TextEditor
"""
def __init__(self, mainwindow, settings=None):
""" Initializes the text editor widget. """
super(TextEditor, self).__init__(mainwindow)
self.setupUi(self.mw)
self.plainTextEdit.clear()
self.plainTextEdit.setEnabled(False)
self.plainTextEdit.show()
self.highlighter = SyntaxHighlighter(self.plainTextEdit.document(), settings)
self.initAutocomplete()
self._initNumberBar()
self.hidden = {}
self.printer = QPrinter(QPrinterInfo.defaultPrinter())
self.plainTextEdit.setTextCursor(
self.plainTextEdit.cursorForPosition(QPoint(0, 0)))
self.canUndo = False
self.canRedo = False
self.ontologySelector.setCurrentIndex(-1)
self.timer = QTimer(self)
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.commit)
#Connects
self.getWidget().textChanged.connect(self.searchCompletion)
self.plainTextEdit.undoAvailable.connect(self.setCanUndo)
self.plainTextEdit.redoAvailable.connect(self.setCanRedo)
self.ontologySelector.currentIndexChanged[int].connect(
self.showOtherOntology)
self.plainTextEdit.textChanged.connect(self.expandIfBracketRemoved)
self.plainTextEdit.textChanged.connect(self.setTextChanged)
self._updateOntologySelector() #must be after connects
@Slot()
def setTextChanged(self):
"""Is called if the text changed signal is thrown and
sets a timer of 3 seconds to reparse the ontology. """
self.timer.stop()
self.timer.start(3000)
def setCanUndo(self, b):
self.canUndo = b
def setCanRedo(self, b):
self.canRedo = b
def _print_(self):
""" Creates a print dialog with the latest text"""
dialog = QPrintDialog()
if dialog.exec_() == QDialog.Accepted :
doc = self.plainTextEdit.document()
doc.print_(dialog.printer())
def _quickPrint_(self):
""" No dialog, just print"""
if self.printer is None :
return
doc = self.plainTextEdit.document()
doc.print_(self.printer)
def _printPreview_(self):
""" Create a print preview"""
dialog = QPrintPreviewDialog()
dialog.paintRequested.connect(self.plainTextEdit.print_)
dialog.exec_()
def saveOntology(self):
""" Save the ontology to disk"""
idx = self.ontologySelector.currentIndex()
ontology = self.ontologySelector.itemData(idx)
if ontology is None :
return
if type(ontology) is Ontology :
ontology.save()
def getActiveOntology(self):
idx = self.ontologySelector.currentIndex()
return self.ontologySelector.itemData(idx)
def undo(self):
if self.canUndo:
self.plainTextEdit.undo()
try:
self.SyntaxController.add_ontology(self.getActiveOntology(), self.plainTextEdit.toPlainText())
except ParseError:
return
self.commit()
else:
super(TextEditor, self).undo()
def redo(self):
if self.canRedo:
self.plainTextEdit.redo()
try:
self.SyntaxController.add_ontology(self.getActiveOntology(), self.plainTextEdit.toPlainText())
except ParseError:
return
self.commit()
else:
super(TextEditor, self).redo()
def _initNumberBar(self):
""" Init the number bar"""
self.number_bar = NumberBar(self)
self.number_bar.setMinimumSize(QSize(30, 0))
self.number_bar.setObjectName("number_bar")
self.gridLayout.addWidget(self.number_bar, 1, 0, 1, 1)
self.plainTextEdit.blockCountChanged.connect(
self.number_bar.adjustWidth)
self.plainTextEdit.updateRequest.connect(
self.number_bar.updateContents)
@Slot(int)
def jumpToLocation(self, location, ontology):
if ontology == str(self.getActiveOntology()):
textBlock = self.plainTextEdit.document().findBlockByNumber(location)
pos = textBlock.position()
textCursor = self.plainTextEdit.textCursor()
textCursor.setPosition(pos)
self.plainTextEdit.setTextCursor(textCursor)
self.plainTextEdit.centerCursor()
def _updateOntologySelector(self):
""" Update the ontology selector where you can select which Ontology to show in the editor"""
current = self.ontologySelector.currentText()
self.ontologySelector.currentIndexChanged[int].disconnect(self.showOtherOntology)
self.ontologySelector.clear()
index = -1
count = 0
for i in self.getIndexAbstractor().ontologies :
if current == i.name :
index = count
self.ontologySelector.addItem(i.name, i)
count = count + 1
self.ontologySelector.setCurrentIndex(index)
# if index == -1 :
# the ontology was removed.
# self.showOtherOntology(index)
if index == -1 :
self.plainTextEdit.setEnabled(False)
self.plainTextEdit.clear()
self.ontologySelector.currentIndexChanged[int].connect(self.showOtherOntology)
def setActiveOntology(self, ontology):
index = -1
count = 0
for i in self.getIndexAbstractor().ontologies :
if ontology.name == i.name :
index = count
break
count = count + 1
self.ontologySelector.setCurrentIndex(index)
@Slot(int)
def showOtherOntology(self, idx):
""" Show other ontology in the plaintextedit
Arguments:
- idx: The id of the current Ontologyselector
"""
dced = False
try:
self.plainTextEdit.textChanged.disconnect(self.setTextChanged)
except RuntimeError:
dced = True
idx = self.ontologySelector.currentIndex()
if idx == -1 :
self.plainTextEdit.setEnabled(False)
self.plainTextEdit.clear()
return
ontologyname = self.ontologySelector.currentText()
for i in self.getIndexAbstractor().ontologies:
if i.name == ontologyname:
self.plainTextEdit.setEnabled(True)
self.getWidget().setPlainText(
self.getIndexAbstractor().get_ontology_file(i).getvalue())
if not dced:
self.plainTextEdit.textChanged.connect(self.setTextChanged)
return
self.plainTextEdit.textChanged.connect(self.commit)
assert False
@Slot()
def expandIfBracketRemoved(self):
""" Check if a line with ( or ) was changed and expand the possible hidden lines
"""
current_line = self.getWidget().document().findBlock(
self.getWidget().textCursor().position()).blockNumber() + 1
if current_line in self.hidden:
self.toggleVisibility(current_line)
@Slot()
def zoomIn(self):
""" Increase the size of the font in the TextEditor
"""
doc = self.getWidget().document()
font = doc.defaultFont()
font.setPointSize(font.pointSize() + 1)
font = QFont(font)
doc.setDefaultFont(font)
@Slot()
def zoomOut(self):
""" Decrease the size of the font in the TextEditor"""
doc = self.getWidget().document()
font = doc.defaultFont()
font.setPointSize(font.pointSize() - 1)
font = QFont(font)
doc.setDefaultFont(font)
@Slot()
def expandAll(self):
""" Expands all hidden code blocks"""
for see in list(self.hidden.keys()):
self.toggleVisibility(see)
@Slot()
def collapseAll(self):
""" Collapse all code blocks (where possible)"""
block = self.getWidget().document().firstBlock()
while block.isValid():
if block.isVisible():
if block.text().count("(") > block.text().count(")"):
self.toggleVisibility(block.blockNumber() + 1)
block = block.next()
def _hideLines(self, lines):
for line in lines:
if line == 0:
continue
block = self.getWidget().document().findBlockByNumber(line - 1)
assert block.isVisible()
block.setVisible(False)
assert not block.isVisible(), "Problem with line %r" % (line)
def _showLines(self, lines):
""" Show the lines not visible starting by lines
Arguments:
- lines: The first line followed by an unvisible block
"""
for line in lines:
block = self.getWidget().document().findBlockByNumber(line - 1)
block.setVisible(True)
def getLayoutWidget(self):
""" Returns the layout widget"""
return self.widget
def numberbarPaint(self, number_bar, event):
"""Paints the line numbers of the code file"""
self.number_bar.link = []
font_metrics = self.getWidget().fontMetrics()
current_line = self.getWidget().document().findBlock(
self.getWidget().textCursor().position()).blockNumber() + 1
block = self.getWidget().firstVisibleBlock()
line_count = block.blockNumber()
painter = QPainter(self.number_bar)
# TODO: second argument is color -> to settings
painter.fillRect(
self.number_bar.rect(), self.getWidget().palette().base())
# Iterate over all visible text blocks in the document.
while block.isValid():
line_count += 1
text = str(line_count)
block_top = self.getWidget().blockBoundingGeometry(
block).translated(self.getWidget().contentOffset()).top()
if not block.isVisible():
block = block.next()
while not block.isVisible():
line_count += 1
block = block.next()
continue
self.number_bar.link.append((block_top, line_count))
# Check if the position of the block is out side of the visible
# area.
if block_top >= event.rect().bottom():
break
# We want the line number for the selected line to be bold.
if line_count == current_line:
font = painter.font()
font.setBold(True)
else:
font = painter.font()
font.setBold(False)
# line opens a block
if line_count in self.hidden:
text += "+"
font.setUnderline(True)
elif block.text().count("(") > block.text().count(")"):
text += "-"
font.setUnderline(True)
else:
font.setUnderline(False)
painter.setFont(font)
# Draw the line number right justified at the position of the
# line.
paint_rect = QRect(
0, block_top, self.number_bar.width(), font_metrics.height())
painter.drawText(paint_rect, Qt.AlignLeft, text)
block = block.next()
painter.end()
def initAutocomplete(self):
"""Inits the QCompleter and gives him a list of words"""
self.completer = QCompleter(
list(OrderedDict.fromkeys(re.split("\\W", self.plainTextEdit.toPlainText()))))
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setWidget(self.getWidget())
self.completer.activated.connect(self.insertCompletion)
def searchCompletion(self):
"""Searches for possible completion from QCompleter to the current text position"""
tc = self.getWidget().textCursor()
tc.movePosition(QTextCursor.PreviousCharacter, QTextCursor.KeepAnchor)
if tc.selectedText() in string.whitespace:
self.completer.popup().hide()
return
tc.movePosition(QTextCursor.StartOfWord, QTextCursor.KeepAnchor)
beginning = tc.selectedText()
if len(beginning) >= 3:
self.completer.setCompletionPrefix(beginning)
self.completer.complete()
shortcut = QShortcut(
QKeySequence("Ctrl+Enter"), self.getWidget(), self.insertCompletion)
def toggleVisibility(self, line):
""" Shows or hides a line """
if line in self.hidden:
self._showLines(self.hidden[line])
del self.hidden[line]
else:
self.hideFrom(line)
# update views
self.getWidget().hide()
self.getWidget().show()
self.number_bar.update()
def hideFrom(self, line):
""" Hides a block starting by line. Do nothing if not hidable"""
block = self.getWidget().document().findBlockByNumber(
line - 1)
openB = block.text().count("(")
closeB = block.text().count(")")
startline = line
# go to line >= line: block starts counting by 0
block = self.getWidget().document().findBlockByNumber(line - 1)
hidden = []
assert block.isValid()
while openB > closeB and block.isValid():
assert block.isValid()
block = block.next()
line = block.blockNumber() + 1
if block.isVisible():
hidden.append(line)
openB += block.text().count("(")
closeB += block.text().count(")")
if hidden == []:
return
self._hideLines(hidden)
self.hidden[startline] = hidden
# set current line in viewable area
current_line = self.getWidget().document().findBlock(
self.getWidget().textCursor().position()).blockNumber() + 1
if (startline < current_line and current_line <= line):
block = block.next()
cursor = QTextCursor(block)
self.getWidget().setTextCursor(cursor)
@Slot(str)
def insertCompletion(self, completion):
""" Adds the completion to current text"""
tc = self.getWidget().textCursor()
tc.movePosition(QTextCursor.StartOfWord, QTextCursor.KeepAnchor)
tc.removeSelectedText()
tc.insertText(completion)
def getWidget(self):
""" Return the QPlainTextEdit Widget"""
return self.plainTextEdit
@Slot()
def commit(self):
""" Overrides commit from RWWidget. """
idx = self.ontologySelector.currentIndex()
if idx == -1:
return
ontology = self.ontologySelector.itemData(idx)
if ontology is None:
return
try:
QApplication.setOverrideCursor(Qt.BusyCursor)
self.SyntaxController.add_ontology(ontology, self.plainTextEdit.toPlainText())
QApplication.setOverrideCursor(Qt.ArrowCursor)
except ParseError:
return
super(TextEditor, self).commit()
@Slot()
def refresh(self):
""" Refreshes the content of the TextEditor (syncing with other widgets)"""
textCursorPos = self.plainTextEdit.textCursor().position()
super(TextEditor, self).refresh()
dced = False
try:
self.plainTextEdit.textChanged.disconnect(self.setTextChanged)
except RuntimeError:
dced = True
idx = self.ontologySelector.currentIndex()
ontology = self.ontologySelector.itemData(idx)
if ontology in self.IA.ontologies:
f = self.IA.get_ontology_file(ontology)
self.plainTextEdit.setPlainText(f.getvalue())
if not dced:
self.plainTextEdit.textChanged.connect(self.setTextChanged)
cursor = self.plainTextEdit.textCursor()
cursor.setPosition(textCursorPos)
self.plainTextEdit.setTextCursor(cursor)
self.plainTextEdit.centerCursor()
class SyntaxHighlightSetting():
""" This class contains a single Setting for a code block in the SyntaxHighlighter.
Variables:
- expression: The regular expression of the syntax block
- expression_end: If the expression has a start and an end expression
- font_size
- font_color
- font_weight
- font_style
- font_underline
- use_font_size
"""
def __init__(self, expression, font_family, font_size, font_color, font_weight, font_style, font_underline, use_font_size, expression_end=''):
self.expression = expression
if expression_end != '':
self.expression_end = expression_end
self.font_family = font_family
self.font_size = font_size
self.font_color = font_color
self.font_weight = font_weight
self.font_style = font_style
self.font_underline = font_underline
self.use_font_size = use_font_size
self.createFormat()
def createFormat(self):
""" Create a QTextCharformat and saves it in self.class_format"""
self.class_format = QTextCharFormat()
self.class_format.setFontFamily(self.font_family)
if self.use_font_size :
self.class_format.setFontPointSize(self.font_size)
self.class_format.setForeground(self.font_color)
self.class_format.setFontWeight(self.font_weight)
self.class_format.setFontItalic(self.font_style)
self.class_format.setFontUnderline(self.font_underline)
def get_format(self):
return self.class_format
def getValues(self):
return [self.expression, self.font_color, self.font_weight]
def serialize(self):
str1 = ""
str1 += self.expression + "//"
str1 += str(self.font_color) + "//"
str1 += str(self.font_weight) + "//"
return str1
def deserialize(self, string):
splitted = string.split("//")
self.expression = splitted[0]
self.font_color = splitted[1]
self.font_weight = splitted[2]
class SyntaxHighlighter(QSyntaxHighlighter):
def __init__(self, document, settings):
super(SyntaxHighlighter, self).__init__(document)
self.settings = settings
use_font_size = self.settings.value("useHighlightingFontSize")
use_font_size = str_to_bool(use_font_size)
self.singleline = []
# logical expressions highlighting
regex = "(and|=>|not|or)(?!\w)"
fFamily = self._getFontFamily("logicExprFontFamily")
fSize = self._getFontSize("logicExprFontSize")
fColor = self._getFontColor("logicExprFontColor")
fWeight = self._getFontWeight("logicExprBoldStyle")
fItalic = self._getFontItalic("logicExprItalicStyle")
fUnderline = self._getFontUnderline("logicExprUnderlinedStyle")
shSettings = SyntaxHighlightSetting(regex, fFamily, fSize, fColor, fWeight, fItalic, fUnderline, use_font_size)
self.singleline.append(shSettings)
# keywords highlighting
regex = "(member|patient|agent|instance|subclass|exists|documentation|part|domain|equal|hasPurpose)[\W'\n']"
fFamily = self._getFontFamily("keywordsFontFamily")
fSize = self._getFontSize("keywordsFontSize")
fColor = self._getFontColor("keywordsFontColor")
fWeight = self._getFontWeight("keywordsBoldStyle")
fItalic = self._getFontItalic("keywordsItalicStyle")
fUnderline = self._getFontUnderline("keywordsUnderlinedStyle")
shSettings = SyntaxHighlightSetting(regex, fFamily, fSize, fColor, fWeight, fItalic, fUnderline, use_font_size)
self.singleline.append(shSettings)
# comment highlighting
regex = ";.*$"
fFamily = self._getFontFamily("commentFontFamily")
fSize = self._getFontSize("commentFontSize")
fColor = self._getFontColor("commentFontColor")
fWeight = self._getFontWeight("commentBoldStyle")
fItalic = self._getFontItalic("commentItalicStyle")
fUnderline = self._getFontUnderline("commentUnderlinedStyle")
shSettings = SyntaxHighlightSetting(regex, fFamily, fSize, fColor, fWeight, fItalic, fUnderline, use_font_size)
self.singleline.append(shSettings)
self.multiline = []
# strings highlighting
fFamily = self._getFontFamily("stringsFontFamily")
fSize = self._getFontSize("stringsFontSize")
fColor = self._getFontColor("stringsFontColor")
fWeight = self._getFontWeight("stringsBoldStyle")
fItalic = self._getFontItalic("stringsItalicStyle")
fUnderline = self._getFontUnderline("stringsUnderlinedStyle")
shSettings = SyntaxHighlightSetting('"', fFamily, fSize, fColor, fWeight, fItalic, fUnderline, use_font_size, expression_end='"')
self.multiline.append(shSettings)
def _getFontFamily(self, propKey):
fFamily = self.settings.value(propKey)
return fFamily
def _getFontSize(self, propKey):
fSize = self.settings.value(propKey)
return int(fSize)
def _getFontColor(self, propKey):
fColor = self.settings.value(propKey)
return QColor(fColor)
def _getFontItalic(self, propKey):
fStyle = self.settings.value(propKey)
return str_to_bool(fStyle)
def _getFontUnderline(self, propKey):
fUnderlined = self.settings.value(propKey)
return str_to_bool(fUnderlined)
def _getFontWeight(self, propKey):
fWeight = self.settings.value(propKey)
fWeight = str_to_bool(fWeight)
if fWeight :
fWeight = QFont.Bold
else :
fWeight = QFont.Normal
return fWeight
def highlightBlock(self, text):
for h in self.singleline:
expression = QRegExp(h.expression)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, h.get_format())
index = expression.indexIn(text, index + length)
for h in self.multiline:
startIndex = 0
self.setCurrentBlockState(0)
expression = QRegExp(h.expression)
expression_end = QRegExp(h.expression_end)
if(self.previousBlockState() != 1):
startIndex = expression.indexIn(text)
while startIndex >= 0:
endIndex = expression_end.indexIn(text, startIndex + 1)
if endIndex == -1:
self.setCurrentBlockState(1)
commentLength = len(text) - startIndex
else:
commentLength = endIndex - startIndex + \
expression_end.matchedLength()
self.setFormat(startIndex, commentLength, h.get_format())
startIndex = expression.indexIn(
text, startIndex + commentLength)
class NumberBar(QWidget):
"""
A widget for the numberbar on the left of QPlainTextEdit
"""
def __init__(self, edit):
QWidget.__init__(self, edit.getWidget())
self.edit = edit
self.adjustWidth(100)
self.link = []
def paintEvent(self, event):
self.edit.numberbarPaint(self, event)
QWidget.paintEvent(self, event)
def adjustWidth(self, count):
width = self.fontMetrics().width(str(count) + "+")
if self.width() != width:
self.setFixedWidth(width)
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
# It would be nice to do
# self.update(0, rect.y(), self.width(), rect.height())
# But we can't because it will not remove the bold on the
# current line if word wrap is enabled and a new block is
# selected.
self.update()
def mouseDoubleClickEvent(self, event):
"""Hides the lines from the line clicked on. """
for (height, line) in self.link:
if height >= event.y():
break
last = line
assert self.edit.getWidget().document().findBlockByNumber(
last - 1).isVisible()
self.edit.toggleVisibility(last)
| |
import os
from django.conf import settings as dj_settings
from django_statsd.clients import statsd
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Q, Search
import olympia.core.logger
log = olympia.core.logger.getLogger('z.es')
DEFAULT_HOSTS = ['localhost:9200']
DEFAULT_TIMEOUT = 5
def get_es(hosts=None, timeout=None, **settings):
"""Create an ES object and return it."""
# Cheap way of de-None-ifying things
hosts = hosts or getattr(dj_settings, 'ES_HOSTS', DEFAULT_HOSTS)
timeout = (timeout if timeout is not None else
getattr(dj_settings, 'ES_TIMEOUT', DEFAULT_TIMEOUT))
if os.environ.get('RUNNING_IN_CI'):
settings['http_auth'] = ('elastic', 'changeme')
return Elasticsearch(hosts, timeout=timeout, **settings)
class ES(object):
def __init__(self, type_, index):
self.type = type_
self.index = index
self.steps = []
self.start = 0
self.stop = None
self.as_list = self.as_dict = False
self._results_cache = None
def _clone(self, next_step=None):
new = self.__class__(self.type, self.index)
new.steps = list(self.steps)
if next_step:
new.steps.append(next_step)
new.start = self.start
new.stop = self.stop
return new
def values(self, *fields):
return self._clone(next_step=('values', fields))
def values_dict(self, *fields):
return self._clone(next_step=('values_dict', fields))
def order_by(self, *fields):
return self._clone(next_step=('order_by', fields))
def query(self, **kw):
return self._clone(next_step=('query', kw.items()))
def filter(self, **kw):
return self._clone(next_step=('filter', kw.items()))
def aggregate(self, **kw):
return self._clone(next_step=('aggregate', kw.items()))
def source(self, *fields):
return self._clone(next_step=('source', fields))
def filter_query_string(self, query_string):
return self._clone(next_step=('filter_query_string', query_string))
def extra(self, **kw):
new = self._clone()
actions = 'values values_dict order_by query filter aggregate'.split()
for key, vals in kw.items():
assert key in actions
if hasattr(vals, 'items'):
new.steps.append((key, vals.items()))
else:
new.steps.append((key, vals))
return new
def count(self):
if self._results_cache is not None:
return self._results_cache.count
else:
return self[:0].raw()['hits']['total']
def __len__(self):
return len(self._do_search())
def __getitem__(self, k):
new = self._clone()
# TODO: validate numbers and ranges
if isinstance(k, slice):
new.start, new.stop = k.start or 0, k.stop
return new
else:
new.start, new.stop = k, k + 1
return list(new)[0]
def _build_query(self):
query = Q()
source = ['id']
sort = []
aggregations = {}
query_string = None
as_list = as_dict = False
for action, value in self.steps:
if action == 'order_by':
for key in value:
if key.startswith('-'):
sort.append({key[1:]: 'desc'})
else:
sort.append(key)
elif action == 'values':
source.extend(value)
as_list, as_dict = True, False
elif action == 'values_dict':
if value:
source.extend(value)
as_list, as_dict = False, True
elif action == 'query':
query &= self._process_queries(value)
elif action == 'filter':
query &= self._process_filters(value)
elif action == 'source':
source.extend(value)
elif action == 'aggregate':
aggregations.update(value)
elif action == 'filter_query_string':
query_string = value
else:
raise NotImplementedError(action)
# If we have a raw query string we are going to apply all sorts
# of boosts and filters to improve relevance scoring.
#
# We are using the same rules that `search.filters:SearchQueryFilter`
# implements to have a single-source of truth for how our
# scoring works.
from olympia.search.filters import SearchQueryFilter
search = Search().query(query)
if query_string:
search = SearchQueryFilter().apply_search_query(
query_string, search)
if sort:
search = search.sort(*sort)
if source:
search = search.source(source)
body = search.to_dict()
# These are manually added for now to simplify a partial port to
# elasticsearch-dsl
if self.start:
body['from'] = self.start
if self.stop is not None:
body['size'] = self.stop - self.start
if aggregations:
body['aggs'] = aggregations
self.source, self.as_list, self.as_dict = source, as_list, as_dict
return body
def _split(self, string):
if '__' in string:
return string.rsplit('__', 1)
else:
return string, None
def _process_filters(self, value):
value = dict(value)
filters = []
for key, val in value.items():
key, field_action = self._split(key)
if field_action is None:
filters.append(Q('term', **{key: val}))
elif field_action == 'exists':
if val is not True:
raise NotImplementedError(
'<field>__exists only works with a "True" value.')
filters.append(Q('exists', **{'field': key}))
elif field_action == 'in':
filters.append(Q('terms', **{key: val}))
elif field_action in ('gt', 'gte', 'lt', 'lte'):
filters.append(Q('range', **{key: {field_action: val}}))
elif field_action == 'range':
from_, to = val
filters.append(Q('range', **{key: {'gte': from_, 'lte': to}}))
return Q('bool', filter=filters)
def _process_queries(self, value):
value = dict(value)
query = Q()
for key, val in value.items():
key, field_action = self._split(key)
if field_action is None:
query &= Q('term', **{key: val})
elif field_action in ('text', 'match'):
query &= Q('match', **{key: val})
elif field_action in ('prefix', 'startswith'):
query &= Q('prefix', **{key: val})
elif field_action in ('gt', 'gte', 'lt', 'lte'):
query &= Q('range', **{key: {field_action: val}})
elif field_action == 'fuzzy':
query &= Q('fuzzy', **{key: val})
return query
def _do_search(self):
if self._results_cache is None:
hits = self.raw()
if self.as_dict:
ResultClass = DictSearchResults
elif self.as_list:
ResultClass = ListSearchResults
else:
ResultClass = ObjectSearchResults
self._results_cache = ResultClass(self.type, hits, self.source)
return self._results_cache
def raw(self):
build_body = self._build_query()
es = get_es()
try:
with statsd.timer('search.es.timer') as timer:
hits = es.search(
body=build_body,
index=self.index,
doc_type=self.type._meta.db_table,
)
except Exception:
log.error(build_body)
raise
statsd.timing('search.es.took', hits['took'])
log.info('[%s] [%s] %s' % (hits['took'], timer.ms, build_body))
return hits
def __iter__(self):
return iter(self._do_search())
def raw_aggregations(self):
return self._do_search().results.get('aggregations', {})
@property
def aggregations(self):
aggregations = {}
raw_aggregations = self.raw_aggregations()
for key, val in raw_aggregations.items():
aggregations[key] = [v for v in val['buckets']]
return aggregations
class SearchResults(object):
def __init__(self, type, results, source):
self.type = type
self.took = results['took']
self.count = results['hits']['total']
self.results = results
self.source = source
self.set_objects(results['hits']['hits'])
def set_objects(self, hits):
raise NotImplementedError()
def __iter__(self):
return iter(self.objects)
def __len__(self):
return len(self.objects)
class DictSearchResults(SearchResults):
def set_objects(self, hits):
self.objects = [r['_source'] for r in hits]
return self.objects
class ListSearchResults(SearchResults):
def set_objects(self, hits):
# When fields are specified in `values(...)` we return the fields.
objs = []
for hit in hits:
objs.append(tuple(v for v in hit['_source'].values()))
self.objects = objs
class ObjectSearchResults(SearchResults):
def set_objects(self, hits):
self.ids = [int(r['_id']) for r in hits]
self.objects = self.type.objects.filter(id__in=self.ids)
def __iter__(self):
objs = dict((obj.id, obj) for obj in self.objects)
return (objs[id] for id in self.ids if id in objs)
| |
# -*- coding: utf-8 -*-
"""Notification tests."""
import django_dynamic_fixture as fixture
from unittest import mock
from django.contrib.auth.models import AnonymousUser, User
from django.http import HttpRequest
from django.test import TestCase
from django.test.utils import override_settings
from messages_extends.models import Message as PersistentMessage
from allauth.account.models import EmailAddress
from readthedocs.builds.models import Build
from readthedocs.notifications import Notification, SiteNotification
from readthedocs.notifications.backends import EmailBackend, SiteBackend
from readthedocs.notifications.constants import (
ERROR,
INFO_NON_PERSISTENT,
WARNING_NON_PERSISTENT,
)
from readthedocs.projects.models import Project
from readthedocs.projects.notifications import (
DeprecatedBuildWebhookNotification,
DeprecatedGitHubWebhookNotification,
)
@override_settings(
NOTIFICATION_BACKENDS=[
'readthedocs.notifications.backends.EmailBackend',
'readthedocs.notifications.backends.SiteBackend',
],
PRODUCTION_DOMAIN='readthedocs.org',
SUPPORT_EMAIL='support@readthedocs.org',
)
@mock.patch('readthedocs.notifications.notification.render_to_string')
@mock.patch.object(Notification, 'send')
class NotificationTests(TestCase):
def test_notification_custom(self, send, render_to_string):
render_to_string.return_value = 'Test'
class TestNotification(Notification):
name = 'foo'
subject = 'This is {{ foo.id }}'
context_object_name = 'foo'
build = fixture.get(Build)
req = mock.MagicMock()
notify = TestNotification(context_object=build, request=req)
self.assertEqual(
notify.get_template_names('email'),
['builds/notifications/foo_email.html'],
)
self.assertEqual(
notify.get_template_names('site'),
['builds/notifications/foo_site.html'],
)
self.assertEqual(
notify.get_subject(),
'This is {}'.format(build.id),
)
self.assertEqual(
notify.get_context_data(),
{
'foo': build,
'production_uri': 'https://readthedocs.org',
'request': req,
# readthedocs_processor context
'DASHBOARD_ANALYTICS_CODE': mock.ANY,
'DO_NOT_TRACK_ENABLED': mock.ANY,
'GLOBAL_ANALYTICS_CODE': mock.ANY,
'PRODUCTION_DOMAIN': 'readthedocs.org',
'PUBLIC_DOMAIN': mock.ANY,
'SITE_ROOT': mock.ANY,
'SUPPORT_EMAIL': 'support@readthedocs.org',
'TEMPLATE_ROOT': mock.ANY,
'USE_PROMOS': mock.ANY,
'USE_SUBDOMAIN': mock.ANY,
},
)
notify.render('site')
render_to_string.assert_has_calls([
mock.call(
context=mock.ANY,
template_name=['builds/notifications/foo_site.html'],
),
])
@mock.patch('readthedocs.notifications.notification.render_to_string')
class NotificationBackendTests(TestCase):
@mock.patch('readthedocs.notifications.backends.send_email')
def test_email_backend(self, send_email, render_to_string):
render_to_string.return_value = 'Test'
class TestNotification(Notification):
name = 'foo'
subject = 'This is {{ foo.id }}'
context_object_name = 'foo'
level = ERROR
build = fixture.get(Build)
req = mock.MagicMock()
user = fixture.get(User)
notify = TestNotification(context_object=build, request=req, user=user)
backend = EmailBackend(request=req)
backend.send(notify)
self.assertEqual(render_to_string.call_count, 1)
send_email.assert_has_calls([
mock.call(
request=mock.ANY,
template='core/email/common.txt',
context={'content': 'Test'},
subject='This is {}'.format(build.id),
template_html='core/email/common.html',
recipient=user.email,
),
])
def test_message_backend(self, render_to_string):
render_to_string.return_value = 'Test'
class TestNotification(Notification):
name = 'foo'
subject = 'This is {{ foo.id }}'
context_object_name = 'foo'
build = fixture.get(Build)
user = fixture.get(User)
req = mock.MagicMock()
notify = TestNotification(context_object=build, request=req, user=user)
backend = SiteBackend(request=req)
backend.send(notify)
self.assertEqual(render_to_string.call_count, 1)
self.assertEqual(PersistentMessage.objects.count(), 1)
message = PersistentMessage.objects.first()
self.assertEqual(message.user, user)
def test_message_anonymous_user(self, render_to_string):
"""Anonymous user still throwns exception on persistent messages."""
render_to_string.return_value = 'Test'
class TestNotification(Notification):
name = 'foo'
subject = 'This is {{ foo.id }}'
context_object_name = 'foo'
build = fixture.get(Build)
user = AnonymousUser()
req = mock.MagicMock()
notify = TestNotification(context_object=build, request=req, user=user)
backend = SiteBackend(request=req)
self.assertEqual(PersistentMessage.objects.count(), 0)
# We should never be adding persistent messages for anonymous users.
# Make sure message_extends sitll throws an exception here
with self.assertRaises(NotImplementedError):
backend.send(notify)
self.assertEqual(render_to_string.call_count, 1)
self.assertEqual(PersistentMessage.objects.count(), 0)
@mock.patch('readthedocs.notifications.backends.send_email')
def test_non_persistent_message(self, send_email, render_to_string):
render_to_string.return_value = 'Test'
class TestNotification(SiteNotification):
name = 'foo'
success_message = 'Test success message'
success_level = INFO_NON_PERSISTENT
user = fixture.get(User)
# Setting the primary and verified email address of the user
email = fixture.get(EmailAddress, user=user, primary=True, verified=True)
n = TestNotification(user, True)
backend = SiteBackend(request=None)
self.assertEqual(PersistentMessage.objects.count(), 0)
backend.send(n)
# No email is sent for non persistent messages
send_email.assert_not_called()
self.assertEqual(PersistentMessage.objects.count(), 1)
self.assertEqual(PersistentMessage.objects.filter(read=False).count(), 1)
self.client.force_login(user)
response = self.client.get('/dashboard/')
self.assertContains(response, 'Test success message')
self.assertEqual(PersistentMessage.objects.count(), 1)
self.assertEqual(PersistentMessage.objects.filter(read=True).count(), 1)
response = self.client.get('/dashboard/')
self.assertNotContains(response, 'Test success message')
@override_settings(
PRODUCTION_DOMAIN='readthedocs.org',
SUPPORT_EMAIL='support@readthedocs.org',
)
class SiteNotificationTests(TestCase):
class TestSiteNotification(SiteNotification):
name = 'foo'
success_message = 'simple success message'
failure_message = {
1: 'simple failure message',
2: '{{ object.name }} object name',
'three': '{{ object.name }} and {{ other.name }} render',
}
success_level = INFO_NON_PERSISTENT
failure_level = WARNING_NON_PERSISTENT
def setUp(self):
self.user = fixture.get(User)
self.context = {'other': {'name': 'other name'}}
self.n = self.TestSiteNotification(
self.user,
True,
context_object={'name': 'object name'},
extra_context=self.context,
)
def test_context_data(self):
context = {
'object': {'name': 'object name'},
'request': None,
'production_uri': 'https://readthedocs.org',
'other': {'name': 'other name'},
# readthedocs_processor context
'DASHBOARD_ANALYTICS_CODE': mock.ANY,
'DO_NOT_TRACK_ENABLED': mock.ANY,
'GLOBAL_ANALYTICS_CODE': mock.ANY,
'PRODUCTION_DOMAIN': 'readthedocs.org',
'PUBLIC_DOMAIN': mock.ANY,
'SITE_ROOT': mock.ANY,
'SUPPORT_EMAIL': 'support@readthedocs.org',
'TEMPLATE_ROOT': mock.ANY,
'USE_PROMOS': mock.ANY,
'USE_SUBDOMAIN': mock.ANY,
}
self.assertEqual(self.n.get_context_data(), context)
def test_message_level(self):
self.n.success = True
self.assertEqual(self.n.get_message_level(), INFO_NON_PERSISTENT)
self.n.success = False
self.assertEqual(self.n.get_message_level(), WARNING_NON_PERSISTENT)
def test_message(self):
self.n.reason = 1
self.assertEqual(self.n.get_message(True), 'simple success message')
self.n.reason = 'three'
self.assertEqual(self.n.get_message(True), 'simple success message')
self.n.reason = 1
self.assertEqual(self.n.get_message(False), 'simple failure message')
self.n.reason = 2
self.assertEqual(self.n.get_message(False), 'object name object name')
self.n.reason = 'three'
self.assertEqual(self.n.get_message(False), 'object name and other name render')
# Invalid reason
self.n.reason = None
with mock.patch('readthedocs.notifications.notification.log') as mock_log:
self.assertEqual(self.n.get_message(False), '')
mock_log.error.assert_called_once()
class DeprecatedWebhookEndpointNotificationTests(TestCase):
def setUp(self):
PersistentMessage.objects.all().delete()
self.user = fixture.get(User)
self.project = fixture.get(Project, users=[self.user])
self.request = HttpRequest()
self.notification = DeprecatedBuildWebhookNotification(
self.project,
self.request,
self.user,
)
@mock.patch('readthedocs.notifications.backends.send_email')
def test_dedupliation(self, send_email):
user = fixture.get(User)
project = fixture.get(Project, main_language_project=None)
project.users.add(user)
project.refresh_from_db()
self.assertEqual(project.users.count(), 1)
self.assertEqual(PersistentMessage.objects.filter(user=user).count(), 0)
DeprecatedGitHubWebhookNotification.notify_project_users([project])
# Site and email notification will go out, site message doesn't have
# any reason to deduplicate yet
self.assertEqual(PersistentMessage.objects.filter(user=user).count(), 1)
self.assertTrue(send_email.called)
send_email.reset_mock()
self.assertFalse(send_email.called)
# Expect the site message to deduplicate, the email won't
DeprecatedGitHubWebhookNotification.notify_project_users([project])
self.assertEqual(PersistentMessage.objects.filter(user=user).count(), 1)
self.assertTrue(send_email.called)
send_email.reset_mock()
| |
"""Support for Belkin WeMo lights."""
import asyncio
import logging
from datetime import timedelta
import requests
import async_timeout
from homeassistant import util
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, ATTR_TRANSITION,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_COLOR, SUPPORT_TRANSITION)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.util.color as color_util
DEPENDENCIES = ['wemo']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
_LOGGER = logging.getLogger(__name__)
SUPPORT_WEMO = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR |
SUPPORT_TRANSITION)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up discovered WeMo switches."""
from pywemo import discovery
if discovery_info is not None:
location = discovery_info['ssdp_description']
mac = discovery_info['mac_address']
try:
device = discovery.device_from_description(location, mac)
except (requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as err:
_LOGGER.error('Unable to access %s (%s)', location, err)
raise PlatformNotReady
if device.model_name == 'Dimmer':
add_entities([WemoDimmer(device)])
else:
setup_bridge(device, add_entities)
def setup_bridge(bridge, add_entities):
"""Set up a WeMo link."""
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
"""Update the WeMo led objects with latest info from the bridge."""
bridge.bridge_update()
new_lights = []
for light_id, device in bridge.Lights.items():
if light_id not in lights:
lights[light_id] = WemoLight(device, update_lights)
new_lights.append(lights[light_id])
if new_lights:
add_entities(new_lights)
update_lights()
class WemoLight(Light):
"""Representation of a WeMo light."""
def __init__(self, device, update_lights):
"""Initialize the WeMo light."""
self.wemo = device
self._state = None
self._update_lights = update_lights
self._available = True
self._update_lock = None
self._brightness = None
self._hs_color = None
self._color_temp = None
self._is_on = None
self._name = self.wemo.name
self._unique_id = self.wemo.uniqueID
async def async_added_to_hass(self):
"""Wemo light added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
@property
def unique_id(self):
"""Return the ID of this light."""
return self._unique_id
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the hs color values of this light."""
return self._hs_color
@property
def color_temp(self):
"""Return the color temperature of this light in mireds."""
return self._color_temp
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_WEMO
@property
def available(self):
"""Return if light is available."""
return self._available
def turn_on(self, **kwargs):
"""Turn the light on."""
transitiontime = int(kwargs.get(ATTR_TRANSITION, 0))
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color is not None:
xy_color = color_util.color_hs_to_xy(*hs_color)
self.wemo.set_color(xy_color, transition=transitiontime)
if ATTR_COLOR_TEMP in kwargs:
colortemp = kwargs[ATTR_COLOR_TEMP]
self.wemo.set_temperature(mireds=colortemp,
transition=transitiontime)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.wemo.turn_on(level=brightness, transition=transitiontime)
else:
self.wemo.turn_on(transition=transitiontime)
def turn_off(self, **kwargs):
"""Turn the light off."""
transitiontime = int(kwargs.get(ATTR_TRANSITION, 0))
self.wemo.turn_off(transition=transitiontime)
def _update(self, force_update=True):
"""Synchronize state with bridge."""
self._update_lights(no_throttle=force_update)
self._state = self.wemo.state
self._is_on = self._state.get('onoff') != 0
self._brightness = self._state.get('level', 255)
self._color_temp = self._state.get('temperature_mireds')
self._available = True
xy_color = self._state.get('color_xy')
if xy_color:
self._hs_color = color_util.color_xy_to_hs(*xy_color)
else:
self._hs_color = None
async def async_update(self):
"""Synchronize state with bridge."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
class WemoDimmer(Light):
"""Representation of a WeMo dimmer."""
def __init__(self, device):
"""Initialize the WeMo dimmer."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._brightness = None
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.debug("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(
self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_schedule_update_ha_state()
async def async_added_to_hass(self):
"""Wemo dimmer added to HASS."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = self.hass.components.wemo.SUBSCRIPTION_REGISTRY
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo dimmer is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning('Lost connection to %s', self.name)
self._available = False
self.wemo.reconnect_with_device()
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
@property
def unique_id(self):
"""Return the ID of this WeMo dimmer."""
return self._serialnumber
@property
def name(self):
"""Return the name of the dimmer if any."""
return self._name
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def brightness(self):
"""Return the brightness of this light between 1 and 100."""
return self._brightness
@property
def is_on(self):
"""Return true if dimmer is on. Standby is on."""
return self._state
def _update(self, force_update=True):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
wemobrightness = int(self.wemo.get_brightness(force_update))
self._brightness = int((wemobrightness * 255) / 100)
if not self._available:
_LOGGER.info('Reconnected to %s', self.name)
self._available = True
except AttributeError as err:
_LOGGER.warning("Could not update status for %s (%s)",
self.name, err)
self._available = False
def turn_on(self, **kwargs):
"""Turn the dimmer on."""
self.wemo.on()
# Wemo dimmer switches use a range of [0, 100] to control
# brightness. Level 255 might mean to set it to previous value
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
brightness = int((brightness / 255) * 100)
else:
brightness = 255
self.wemo.set_brightness(brightness)
def turn_off(self, **kwargs):
"""Turn the dimmer off."""
self.wemo.off()
@property
def available(self):
"""Return if dimmer is available."""
return self._available
| |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SfcMetadataAllocEnum' : _MetaInfoEnum('SfcMetadataAllocEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg',
{
'type1':'type1',
}, 'Cisco-IOS-XR-vservice-cfg', _yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg']),
'SfcMetadataType1AllocFormatEnum' : _MetaInfoEnum('SfcMetadataType1AllocFormatEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg',
{
'dc-allocation':'dc_allocation',
}, 'Cisco-IOS-XR-vservice-cfg', _yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg']),
'SfcMetadataDispositionActionEnum' : _MetaInfoEnum('SfcMetadataDispositionActionEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg',
{
'redirect-nexthop':'redirect_nexthop',
}, 'Cisco-IOS-XR-vservice-cfg', _yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg']),
'SfcSfTransportEnum' : _MetaInfoEnum('SfcSfTransportEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg',
{
'vxlan-gpe':'vxlan_gpe',
}, 'Cisco-IOS-XR-vservice-cfg', _yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg']),
'SfcMetadataDispositionMatchEnum' : _MetaInfoEnum('SfcMetadataDispositionMatchEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg',
{
'type1-dcalloc-tenant-id':'type1_dcalloc_tenant_id',
}, 'Cisco-IOS-XR-vservice-cfg', _yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg']),
'Vservice.ServiceFunctionLocator.Names.Name.Node' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionLocator.Names.Name.Node',
False,
[
_MetaInfoClassMember('ipv4-destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 destination address
''',
'ipv4_destination_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('ipv4-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 source address
''',
'ipv4_source_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('transport', REFERENCE_ENUM_CLASS, 'SfcSfTransportEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcSfTransportEnum',
[], [],
''' Transport type
''',
'transport',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('vni', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' VNI
''',
'vni',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionLocator.Names.Name' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionLocator.Names.Name',
False,
[
_MetaInfoClassMember('function-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Service function/forwarder name
''',
'function_name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('locator-id', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Specify locator id
''',
'locator_id',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionLocator.Names.Name.Node',
[], [],
''' configure sff/sffl
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'name',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionLocator.Names' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionLocator.Names',
False,
[
_MetaInfoClassMember('name', REFERENCE_LIST, 'Name' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionLocator.Names.Name',
[], [],
''' service function name
''',
'name',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'names',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionLocator' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionLocator',
False,
[
_MetaInfoClassMember('names', REFERENCE_CLASS, 'Names' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionLocator.Names',
[], [],
''' Mention the sf/sff name
''',
'names',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'service-function-locator',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataDispositions.MetadataDisposition.MatchEntry.Node' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataDispositions.MetadataDisposition.MatchEntry.Node',
False,
[
_MetaInfoClassMember('action-type', REFERENCE_ENUM_CLASS, 'SfcMetadataDispositionActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataDispositionActionEnum',
[], [],
''' action type
''',
'action_type',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('match-type', REFERENCE_ENUM_CLASS, 'SfcMetadataDispositionMatchEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataDispositionMatchEnum',
[], [],
''' match type
''',
'match_type',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('nexthop-ipv4-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 nexthop address
''',
'nexthop_ipv4_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('tenant-id', REFERENCE_LEAFLIST, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' 24-bit tenant id
''',
'tenant_id',
'Cisco-IOS-XR-vservice-cfg', False, max_elements=4),
_MetaInfoClassMember('vrf', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataDispositions.MetadataDisposition.MatchEntry' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataDispositions.MetadataDisposition.MatchEntry',
False,
[
_MetaInfoClassMember('match-entry-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' match entry name
''',
'match_entry_name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataDispositions.MetadataDisposition.MatchEntry.Node',
[], [],
''' configure disposition data
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'match-entry',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataDispositions.MetadataDisposition' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataDispositions.MetadataDisposition',
False,
[
_MetaInfoClassMember('disposition-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' disposition name
''',
'disposition_name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('format', REFERENCE_ENUM_CLASS, 'SfcMetadataType1AllocFormatEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataType1AllocFormatEnum',
[], [],
''' Specify Format
''',
'format',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('match-entry', REFERENCE_LIST, 'MatchEntry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataDispositions.MetadataDisposition.MatchEntry',
[], [],
''' match entry name
''',
'match_entry',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'metadata-disposition',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataDispositions' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataDispositions',
False,
[
_MetaInfoClassMember('metadata-disposition', REFERENCE_LIST, 'MetadataDisposition' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataDispositions.MetadataDisposition',
[], [],
''' metadata disposition name
''',
'metadata_disposition',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'metadata-dispositions',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionForwardLocator.Names.Name.Node' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionForwardLocator.Names.Name.Node',
False,
[
_MetaInfoClassMember('ipv4-destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 destination address
''',
'ipv4_destination_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('ipv4-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 source address
''',
'ipv4_source_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('transport', REFERENCE_ENUM_CLASS, 'SfcSfTransportEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcSfTransportEnum',
[], [],
''' Transport type
''',
'transport',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('vni', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' VNI
''',
'vni',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionForwardLocator.Names.Name' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionForwardLocator.Names.Name',
False,
[
_MetaInfoClassMember('function-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Service function/forwarder name
''',
'function_name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('locator-id', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Specify locator id
''',
'locator_id',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionForwardLocator.Names.Name.Node',
[], [],
''' configure sff/sffl
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'name',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionForwardLocator.Names' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionForwardLocator.Names',
False,
[
_MetaInfoClassMember('name', REFERENCE_LIST, 'Name' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionForwardLocator.Names.Name',
[], [],
''' service function name
''',
'name',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'names',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionForwardLocator' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionForwardLocator',
False,
[
_MetaInfoClassMember('names', REFERENCE_CLASS, 'Names' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionForwardLocator.Names',
[], [],
''' Mention the sf/sff name
''',
'names',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'service-function-forward-locator',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataTemplates.MetadataTemplate' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataTemplates.MetadataTemplate',
False,
[
_MetaInfoClassMember('metadata-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' metadata name
''',
'metadata_name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('type', REFERENCE_ENUM_CLASS, 'SfcMetadataAllocEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataAllocEnum',
[], [],
''' Specify Type
''',
'type',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('format', REFERENCE_ENUM_CLASS, 'SfcMetadataType1AllocFormatEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataType1AllocFormatEnum',
[], [],
''' Specify Format
''',
'format',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('tenant-id', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Enter 24-bit tenant id
''',
'tenant_id',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'metadata-template',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.MetadataTemplates' : {
'meta_info' : _MetaInfoClass('Vservice.MetadataTemplates',
False,
[
_MetaInfoClassMember('metadata-template', REFERENCE_LIST, 'MetadataTemplate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataTemplates.MetadataTemplate',
[], [],
''' metadata name, type and format
''',
'metadata_template',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'metadata-templates',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate.Node' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate.Node',
False,
[
_MetaInfoClassMember('action', REFERENCE_ENUM_CLASS, 'SfcMetadataDispositionActionEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'SfcMetadataDispositionActionEnum',
[], [],
''' default action enum
''',
'action',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('metatdata-disposition', ATTRIBUTE, 'str' , None, None,
[], [],
''' metadata-disposition name
''',
'metatdata_disposition',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('nexthop-ipv4-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 nexthop address
''',
'nexthop_ipv4_address',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('vrf', ATTRIBUTE, 'str' , None, None,
[], [],
''' nexthop vrf name
''',
'vrf',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate',
False,
[
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate.Node',
[], [],
''' configure default terminate action
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'terminate',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName.Node' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName.Node',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Service function path
''',
'enable',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('reserved', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Dummy
''',
'reserved',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' SFF Name
''',
'name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName.Node',
[], [],
''' configure SFP
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'sff-name',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames',
False,
[
_MetaInfoClassMember('sff-name', REFERENCE_LIST, 'SffName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName',
[], [],
''' service function forwarder name
''',
'sff_name',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'sff-names',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName.Node' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName.Node',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable Service function path
''',
'enable',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('reserved', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Dummy
''',
'reserved',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName',
False,
[
_MetaInfoClassMember('name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' SF Name
''',
'name',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('node', REFERENCE_CLASS, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName.Node',
[], [],
''' configure SFP
''',
'node',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'sf-name',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames',
False,
[
_MetaInfoClassMember('sf-name', REFERENCE_LIST, 'SfName' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName',
[], [],
''' service function name
''',
'sf_name',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'sf-names',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex',
False,
[
_MetaInfoClassMember('index', ATTRIBUTE, 'int' , None, None,
[('1', '255')], [],
''' Specify the id of service function
''',
'index',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('sf-names', REFERENCE_CLASS, 'SfNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames',
[], [],
''' service function
''',
'sf_names',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('sff-names', REFERENCE_CLASS, 'SffNames' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames',
[], [],
''' service function forwarder
''',
'sff_names',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('terminate', REFERENCE_CLASS, 'Terminate' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate',
[], [],
''' configure terminate
''',
'terminate',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'service-index',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths.Path' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths.Path',
False,
[
_MetaInfoClassMember('path-id', ATTRIBUTE, 'int' , None, None,
[('1', '16777215')], [],
''' Specify the service function path id
''',
'path_id',
'Cisco-IOS-XR-vservice-cfg', True),
_MetaInfoClassMember('service-index', REFERENCE_LIST, 'ServiceIndex' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex',
[], [],
''' specify the service index
''',
'service_index',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'path',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath.Paths' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath.Paths',
False,
[
_MetaInfoClassMember('path', REFERENCE_LIST, 'Path' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths.Path',
[], [],
''' specify the service function path id
''',
'path',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'paths',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice.ServiceFunctionPath' : {
'meta_info' : _MetaInfoClass('Vservice.ServiceFunctionPath',
False,
[
_MetaInfoClassMember('paths', REFERENCE_CLASS, 'Paths' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath.Paths',
[], [],
''' service function path id
''',
'paths',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'service-function-path',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
'Vservice' : {
'meta_info' : _MetaInfoClass('Vservice',
False,
[
_MetaInfoClassMember('metadata-dispositions', REFERENCE_CLASS, 'MetadataDispositions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataDispositions',
[], [],
''' Configure metadata disposition
''',
'metadata_dispositions',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('metadata-templates', REFERENCE_CLASS, 'MetadataTemplates' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.MetadataTemplates',
[], [],
''' configure metadata imposition
''',
'metadata_templates',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('service-function-forward-locator', REFERENCE_CLASS, 'ServiceFunctionForwardLocator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionForwardLocator',
[], [],
''' configure service function forward locator
''',
'service_function_forward_locator',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('service-function-locator', REFERENCE_CLASS, 'ServiceFunctionLocator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionLocator',
[], [],
''' configure service function locator
''',
'service_function_locator',
'Cisco-IOS-XR-vservice-cfg', False),
_MetaInfoClassMember('service-function-path', REFERENCE_CLASS, 'ServiceFunctionPath' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg', 'Vservice.ServiceFunctionPath',
[], [],
''' service function path
''',
'service_function_path',
'Cisco-IOS-XR-vservice-cfg', False),
],
'Cisco-IOS-XR-vservice-cfg',
'vservice',
_yang_ns._namespaces['Cisco-IOS-XR-vservice-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_vservice_cfg'
),
},
}
_meta_table['Vservice.ServiceFunctionLocator.Names.Name.Node']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionLocator.Names.Name']['meta_info']
_meta_table['Vservice.ServiceFunctionLocator.Names.Name']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionLocator.Names']['meta_info']
_meta_table['Vservice.ServiceFunctionLocator.Names']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionLocator']['meta_info']
_meta_table['Vservice.MetadataDispositions.MetadataDisposition.MatchEntry.Node']['meta_info'].parent =_meta_table['Vservice.MetadataDispositions.MetadataDisposition.MatchEntry']['meta_info']
_meta_table['Vservice.MetadataDispositions.MetadataDisposition.MatchEntry']['meta_info'].parent =_meta_table['Vservice.MetadataDispositions.MetadataDisposition']['meta_info']
_meta_table['Vservice.MetadataDispositions.MetadataDisposition']['meta_info'].parent =_meta_table['Vservice.MetadataDispositions']['meta_info']
_meta_table['Vservice.ServiceFunctionForwardLocator.Names.Name.Node']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionForwardLocator.Names.Name']['meta_info']
_meta_table['Vservice.ServiceFunctionForwardLocator.Names.Name']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionForwardLocator.Names']['meta_info']
_meta_table['Vservice.ServiceFunctionForwardLocator.Names']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionForwardLocator']['meta_info']
_meta_table['Vservice.MetadataTemplates.MetadataTemplate']['meta_info'].parent =_meta_table['Vservice.MetadataTemplates']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate.Node']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName.Node']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames.SffName']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName.Node']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames.SfName']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.Terminate']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SffNames']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex.SfNames']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path.ServiceIndex']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths.Path']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths.Path']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath.Paths']['meta_info']
_meta_table['Vservice.ServiceFunctionPath.Paths']['meta_info'].parent =_meta_table['Vservice.ServiceFunctionPath']['meta_info']
_meta_table['Vservice.ServiceFunctionLocator']['meta_info'].parent =_meta_table['Vservice']['meta_info']
_meta_table['Vservice.MetadataDispositions']['meta_info'].parent =_meta_table['Vservice']['meta_info']
_meta_table['Vservice.ServiceFunctionForwardLocator']['meta_info'].parent =_meta_table['Vservice']['meta_info']
_meta_table['Vservice.MetadataTemplates']['meta_info'].parent =_meta_table['Vservice']['meta_info']
_meta_table['Vservice.ServiceFunctionPath']['meta_info'].parent =_meta_table['Vservice']['meta_info']
| |
import unittest
import unittest.mock as mock
import numpy as np
import bson
import util.dict_utils as du
import util.transform as tf
import database.client
import metadata.camera_intrinsics as cam_intr
import metadata.image_metadata as imeta
import core.tests.mock_types as mock_core
import core.image_collection
import core.image_entity as ie
import core.sequence_type
import core.trial_result
import core.benchmark
import database.tests.mock_database_client as mock_client_factory
import batch_analysis.task
import batch_analysis.tasks.generate_dataset_task as generate_dataset_task
import batch_analysis.tasks.run_system_task as run_system_task
import batch_analysis.tasks.benchmark_trial_task as benchmark_trial_task
import simulation.controllers.trajectory_follow_controller as traj_follow_controller
import batch_analysis.invalidate as invalidate
class TestInvalidateImageCollection(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Create the basic image sources, systems, and benchmarks.
self.image_collections = [make_image_collection(self.mock_db_client).identifier for _ in range(2)]
self.systems = [self.mock_db_client.system_collection.insert_one(mock_core.MockSystem().serialize()).inserted_id
for _ in range(2)]
self.benchmarks = [self.mock_db_client.benchmarks_collection.insert_one(
mock_core.MockBenchmark().serialize()).inserted_id for _ in range(2)]
# Add generate dataset tasks for all the image collections.
self.generate_dataset_tasks = {
image_collection_id: self.mock_db_client.tasks_collection.insert_one(
generate_dataset_task.GenerateDatasetTask(
bson.ObjectId(), bson.ObjectId(), {}, result=image_collection_id,
state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
for image_collection_id in self.image_collections
}
# Add controllers that follow the image sources
self.controllers = {
image_collection_id: self.mock_db_client.image_source_collection.insert_one(
traj_follow_controller.TrajectoryFollowController(
trajectory={}, trajectory_source=image_collection_id,
sequence_type=core.sequence_type.ImageSequenceType.NON_SEQUENTIAL).serialize()
).inserted_id
for image_collection_id in self.image_collections
}
# Create run system tasks and trial results
self.run_system_tasks = {}
self.trial_results = {}
for image_collection_id in self.image_collections:
self.run_system_tasks[image_collection_id] = []
self.trial_results[image_collection_id] = []
for system_id in self.systems:
trial_result_id = self.mock_db_client.trials_collection.insert_one(core.trial_result.TrialResult(
system_id, True, core.sequence_type.ImageSequenceType.NON_SEQUENTIAL, {}).serialize()).inserted_id
self.trial_results[image_collection_id].append(trial_result_id)
self.run_system_tasks[image_collection_id].append(self.mock_db_client.tasks_collection.insert_one(
run_system_task.RunSystemTask(system_id, image_collection_id, result=trial_result_id,
state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id)
def test_invalidate_image_collection_removes_tasks(self):
self.assertEqual(len(self.image_collections) * (1 + len(self.systems)),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_image_collection(self.mock_db_client, self.image_collections[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual((len(self.image_collections) - 1) * (1 + len(self.systems)),
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated image collection are removed
self.assertEqual(0, self.mock_db_client.tasks_collection.find({
'_id': self.generate_dataset_tasks[self.image_collections[0]]}).count())
for run_system_task_id in self.run_system_tasks[self.image_collections[0]]:
self.assertEqual(0, self.mock_db_client.tasks_collection.find({'_id': run_system_task_id}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.image_collections)):
self.assertEqual(1, self.mock_db_client.tasks_collection.find({
'_id': self.generate_dataset_tasks[self.image_collections[i]]}).count())
for run_system_task_id in self.run_system_tasks[self.image_collections[i]]:
self.assertEqual(1, self.mock_db_client.tasks_collection.find({'_id': run_system_task_id}).count())
def test_invalidate_image_collection_invalidates_descendant_trials(self):
with mock.patch('batch_analysis.invalidate.invalidate_trial_result') as mock_invalidate_trial:
invalidate.invalidate_image_collection(self.mock_db_client, self.image_collections[0])
# Check that all the descendant trials are invalidated
for trial_result_id in self.trial_results[self.image_collections[0]]:
self.assertIn(mock.call(self.mock_db_client, trial_result_id),
mock_invalidate_trial.call_args_list)
# Check that the other trials are not invalidated
for i in range(1, len(self.image_collections)):
for trial_result_id in self.trial_results[self.image_collections[i]]:
self.assertNotIn(mock.call(self.mock_db_client, trial_result_id),
mock_invalidate_trial.call_args_list)
def test_invalidate_image_collection_invalidates_descendant_controllers(self):
with mock.patch('batch_analysis.invalidate.invalidate_controller') as mock_invalidate_controller:
invalidate.invalidate_image_collection(self.mock_db_client, self.image_collections[0])
# Check that all the descendant controllers are invalidated
self.assertIn(mock.call(self.mock_db_client, self.controllers[self.image_collections[0]]),
mock_invalidate_controller.call_args_list)
# Check that the descendant controllers are not invalidated
for i in range(1, len(self.image_collections)):
self.assertNotIn(mock.call(self.mock_db_client, self.controllers[self.image_collections[i]]),
mock_invalidate_controller.call_args_list)
def test_invalidate_image_collection_removes_the_collection(self):
invalidate.invalidate_image_collection(self.mock_db_client, self.image_collections[0])
# Check that the image collection is removed
self.assertEqual(0, self.mock_db_client.image_source_collection.find({
'_id': self.image_collections[0]}).count())
# Check that the other collections are still here
for i in range(1, len(self.image_collections)):
self.assertEqual(1, self.mock_db_client.image_source_collection.find({
'_id': self.image_collections[i]}).count())
def test_invalidate_image_collection_removes_images(self):
# Collect the image ids
removed_ids = []
kept_ids = []
for s_image_collection in self.mock_db_client.image_source_collection.find({'images': {'$exists': True}},
{'_id': True, 'images': True}):
if s_image_collection['_id'] == self.image_collections[0]:
removed_ids += [image_id for _, image_id in s_image_collection['images']]
else:
kept_ids += [image_id for _, image_id in s_image_collection['images']]
invalidate.invalidate_image_collection(self.mock_db_client, self.image_collections[0])
# Check the removed ids are gone
for image_id in removed_ids:
self.assertEqual(0, self.mock_db_client.image_collection.find({'_id': image_id}).count())
for image_id in kept_ids:
self.assertEqual(1, self.mock_db_client.image_collection.find({'_id': image_id}).count())
class TestInvalidateController(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Add controllers that generate image collections
self.controllers = [
self.mock_db_client.image_source_collection.insert_one(
traj_follow_controller.TrajectoryFollowController(
trajectory={}, trajectory_source=None,
sequence_type=core.sequence_type.ImageSequenceType.NON_SEQUENTIAL).serialize()
).inserted_id
for _ in range(2)
]
# Create image sources and generate dataset tasks for the controller
self.generate_dataset_tasks = {}
self.image_collections = {}
for controller_id in self.controllers:
# Add generate dataset tasks for all the image collections.
self.image_collections[controller_id] = make_image_collection(self.mock_db_client).identifier
self.generate_dataset_tasks[controller_id] = self.mock_db_client.tasks_collection.insert_one(
generate_dataset_task.GenerateDatasetTask(
simulator_id=bson.ObjectId(), controller_id=controller_id, simulator_config={},
result=self.image_collections[controller_id], state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
def test_invalidate_controller_removes_tasks(self):
self.assertEqual(len(self.image_collections),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_controller(self.mock_db_client, self.controllers[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual(len(self.image_collections) - 1,
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated controller are removed
self.assertEqual(0, self.mock_db_client.tasks_collection.find({
'_id': self.generate_dataset_tasks[self.controllers[0]]}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.image_collections)):
self.assertEqual(1, self.mock_db_client.tasks_collection.find({
'_id': self.generate_dataset_tasks[self.controllers[i]]}).count())
def test_invalidate_controller_invalidates_generated_image_collections(self):
with mock.patch('batch_analysis.invalidate.invalidate_image_collection') as mock_invalidate_source:
invalidate.invalidate_controller(self.mock_db_client, self.controllers[0])
# Check that all the generated image collections are invalidated
self.assertIn(mock.call(self.mock_db_client, self.image_collections[self.controllers[0]]),
mock_invalidate_source.call_args_list)
# Check that the other image collections are not invalidated
for i in range(1, len(self.image_collections)):
self.assertNotIn(mock.call(self.mock_db_client, self.image_collections[self.controllers[i]]),
mock_invalidate_source.call_args_list)
def test_invalidate_controller_removes_controller(self):
invalidate.invalidate_controller(self.mock_db_client, self.controllers[0])
# Check that the controller is removed
self.assertEqual(0, self.mock_db_client.image_source_collection.find({
'_id': self.controllers[0]}).count())
# Check that the other controllers are still here
for i in range(1, len(self.controllers)):
self.assertEqual(1, self.mock_db_client.image_source_collection.find({
'_id': self.controllers[i]}).count())
class TestInvalidateSystem(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Create the basic image sources, systems, and benchmarks.
self.image_collections = [make_image_collection(self.mock_db_client).identifier for _ in range(2)]
self.systems = [self.mock_db_client.system_collection.insert_one(mock_core.MockSystem().serialize()).inserted_id
for _ in range(2)]
self.benchmarks = [self.mock_db_client.benchmarks_collection.insert_one(
mock_core.MockBenchmark().serialize()).inserted_id for _ in range(2)]
# Create run system tasks and trial results
self.run_system_tasks = {}
self.trial_results = {}
for system_id in self.systems:
self.run_system_tasks[system_id] = []
self.trial_results[system_id] = []
for image_collection_id in self.image_collections:
trial_result_id = self.mock_db_client.trials_collection.insert_one(core.trial_result.TrialResult(
system_id, True, core.sequence_type.ImageSequenceType.NON_SEQUENTIAL, {}).serialize()).inserted_id
self.trial_results[system_id].append(trial_result_id)
self.run_system_tasks[system_id].append(self.mock_db_client.tasks_collection.insert_one(
run_system_task.RunSystemTask(system_id, image_collection_id, result=trial_result_id,
state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id)
def test_invalidate_system_removes_tasks(self):
self.assertEqual(len(self.image_collections) * len(self.systems),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_system(self.mock_db_client, self.systems[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual(len(self.image_collections) * (len(self.systems) - 1),
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated system are removed
for run_system_task_id in self.run_system_tasks[self.systems[0]]:
self.assertEqual(0, self.mock_db_client.tasks_collection.find({'_id': run_system_task_id}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.systems)):
for run_system_task_id in self.run_system_tasks[self.systems[i]]:
self.assertEqual(1, self.mock_db_client.tasks_collection.find({'_id': run_system_task_id}).count())
def test_invalidate_system_invalidates_trials(self):
with mock.patch('batch_analysis.invalidate.invalidate_trial_result') as mock_invalidate_trial:
invalidate.invalidate_system(self.mock_db_client, self.systems[0])
# Check that all the descendant trials are invalidated
for trial_result_id in self.trial_results[self.systems[0]]:
self.assertIn(mock.call(self.mock_db_client, trial_result_id),
mock_invalidate_trial.call_args_list)
# Check that the other trials are not invalidated
for i in range(1, len(self.systems)):
for trial_result_id in self.trial_results[self.systems[i]]:
self.assertNotIn(mock.call(self.mock_db_client, trial_result_id),
mock_invalidate_trial.call_args_list)
def test_invalidate_system_removes_system(self):
invalidate.invalidate_system(self.mock_db_client, self.systems[0])
# Check that the system is removed
self.assertEqual(0, self.mock_db_client.system_collection.find({
'_id': self.systems[0]}).count())
# Check that the other systems are still here
for i in range(1, len(self.systems)):
self.assertEqual(1, self.mock_db_client.system_collection.find({
'_id': self.systems[i]}).count())
class TestInvalidateTrial(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Create the basic image sources, systems, and benchmarks.
self.image_collections = [make_image_collection(self.mock_db_client).identifier for _ in range(2)]
self.systems = [self.mock_db_client.system_collection.insert_one(mock_core.MockSystem().serialize()).inserted_id
for _ in range(2)]
self.benchmarks = [self.mock_db_client.benchmarks_collection.insert_one(
mock_core.MockBenchmark().serialize()).inserted_id for _ in range(2)]
# Create run system tasks and trial results
self.run_system_tasks = {}
self.trial_results = []
for image_collection_id in self.image_collections:
for system_id in self.systems:
trial_result_id = self.mock_db_client.trials_collection.insert_one(core.trial_result.TrialResult(
system_id, True, core.sequence_type.ImageSequenceType.NON_SEQUENTIAL, {}).serialize()).inserted_id
self.trial_results.append(trial_result_id)
self.run_system_tasks[trial_result_id] = self.mock_db_client.tasks_collection.insert_one(
run_system_task.RunSystemTask(system_id, image_collection_id, result=trial_result_id,
state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
self.benchmark_trial_tasks = {}
self.benchmark_results = {}
for trial_result_id in self.trial_results:
self.benchmark_trial_tasks[trial_result_id] = []
self.benchmark_results[trial_result_id] = []
for benchmark_id in self.benchmarks:
result_id = self.mock_db_client.results_collection.insert_one(
core.benchmark.BenchmarkResult(benchmark_id, trial_result_id, True).serialize()).inserted_id
self.benchmark_results[trial_result_id].append(result_id)
self.benchmark_trial_tasks[trial_result_id].append(
self.mock_db_client.tasks_collection.insert_one(
benchmark_trial_task.BenchmarkTrialTask(
trial_result_id, benchmark_id,
result=result_id, state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
)
def test_invalidate_trial_removes_tasks(self):
self.assertEqual(len(self.trial_results) * (1 + len(self.benchmarks)),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_trial_result(self.mock_db_client, self.trial_results[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual((len(self.trial_results) - 1) * (1 + len(self.benchmarks)),
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated trial are removed
self.assertEqual(0, self.mock_db_client.tasks_collection.find({
'_id': self.run_system_tasks[self.trial_results[0]]}).count())
for benchmark_task in self.benchmark_trial_tasks[self.trial_results[0]]:
self.assertEqual(0, self.mock_db_client.tasks_collection.find({'_id': benchmark_task}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.trial_results)):
self.assertEqual(1, self.mock_db_client.tasks_collection.find({
'_id': self.run_system_tasks[self.trial_results[i]]}).count())
for benchmark_task in self.benchmark_trial_tasks[self.trial_results[i]]:
self.assertEqual(1, self.mock_db_client.tasks_collection.find({'_id': benchmark_task}).count())
def test_invalidate_trial_invalidates_results(self):
with mock.patch('batch_analysis.invalidate.invalidate_benchmark_result') as mock_invalidate_result:
invalidate.invalidate_trial_result(self.mock_db_client, self.trial_results[0])
# Check that all the descendant results are invalidated
for result_id in self.benchmark_results[self.trial_results[0]]:
self.assertIn(mock.call(self.mock_db_client, result_id),
mock_invalidate_result.call_args_list)
# Check that the other results are not invalidated
for i in range(1, len(self.trial_results)):
for result_id in self.benchmark_results[self.trial_results[i]]:
self.assertNotIn(mock.call(self.mock_db_client, result_id),
mock_invalidate_result.call_args_list)
def test_invalidate_trial_removes_trial(self):
invalidate.invalidate_trial_result(self.mock_db_client, self.trial_results[0])
# Check that the image collection is removed
self.assertEqual(0, self.mock_db_client.trials_collection.find({
'_id': self.trial_results[0]}).count())
# Check that the other collections are still here
for i in range(1, len(self.trial_results)):
self.assertEqual(1, self.mock_db_client.trials_collection.find({
'_id': self.trial_results[i]}).count())
class TestInvalidateBenchmark(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Create the basic trial results and benchmarks
self.trial_results = [self.mock_db_client.trials_collection.insert_one(
core.trial_result.TrialResult(
bson.ObjectId(), True, core.sequence_type.ImageSequenceType.NON_SEQUENTIAL, {}).serialize()
).inserted_id for _ in range(2)]
self.benchmarks = [self.mock_db_client.benchmarks_collection.insert_one(
mock_core.MockBenchmark().serialize()).inserted_id for _ in range(2)]
self.benchmark_trial_tasks = {}
self.benchmark_results = {}
# Create the benchmark results and tasks
for benchmark_id in self.benchmarks:
self.benchmark_trial_tasks[benchmark_id] = []
self.benchmark_results[benchmark_id] = []
for trial_result_id in self.trial_results:
result_id = self.mock_db_client.results_collection.insert_one(
core.benchmark.BenchmarkResult(benchmark_id, trial_result_id, True).serialize()).inserted_id
self.benchmark_results[benchmark_id].append(result_id)
self.benchmark_trial_tasks[benchmark_id].append(
self.mock_db_client.tasks_collection.insert_one(
benchmark_trial_task.BenchmarkTrialTask(
trial_result_id, benchmark_id,
result=result_id, state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
)
def test_invalidate_benchmark_removes_tasks(self):
self.assertEqual(len(self.trial_results) * (len(self.benchmarks)),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_benchmark(self.mock_db_client, self.benchmarks[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual(len(self.trial_results) * (len(self.benchmarks) - 1),
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated benchmark are removed
for benchmark_task in self.benchmark_trial_tasks[self.benchmarks[0]]:
self.assertEqual(0, self.mock_db_client.tasks_collection.find({'_id': benchmark_task}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.benchmarks)):
for benchmark_task in self.benchmark_trial_tasks[self.benchmarks[i]]:
self.assertEqual(1, self.mock_db_client.tasks_collection.find({'_id': benchmark_task}).count())
def test_invalidate_benchmark_invalidates_results(self):
with mock.patch('batch_analysis.invalidate.invalidate_benchmark_result') as mock_invalidate_result:
invalidate.invalidate_benchmark(self.mock_db_client, self.benchmarks[0])
# Check that all the descendant results are invalidated
for result_id in self.benchmark_results[self.benchmarks[0]]:
self.assertIn(mock.call(self.mock_db_client, result_id),
mock_invalidate_result.call_args_list)
# Check that the other results are not invalidated
for i in range(1, len(self.benchmarks)):
for result_id in self.benchmark_results[self.benchmarks[i]]:
self.assertNotIn(mock.call(self.mock_db_client, result_id),
mock_invalidate_result.call_args_list)
def test_invalidate_benchmark_removes_benchmark(self):
invalidate.invalidate_benchmark(self.mock_db_client, self.benchmarks[0])
# Check that the image collection is removed
self.assertEqual(0, self.mock_db_client.benchmarks_collection.find({
'_id': self.benchmarks[0]}).count())
# Check that the other collections are still here
for i in range(1, len(self.benchmarks)):
self.assertEqual(1, self.mock_db_client.benchmarks_collection.find({
'_id': self.benchmarks[i]}).count())
class TestInvalidateResult(unittest.TestCase):
def setUp(self):
self.zombie_db_client = mock_client_factory.create()
self.mock_db_client = self.zombie_db_client.mock
# Create benchmark results and tasks
self.benchmark_trial_tasks = {}
self.benchmark_results = []
for _ in range(2):
result_id = self.mock_db_client.results_collection.insert_one(
core.benchmark.BenchmarkResult(bson.ObjectId(), bson.ObjectId(), True).serialize()).inserted_id
self.benchmark_results.append(result_id)
self.benchmark_trial_tasks[result_id] = self.mock_db_client.tasks_collection.insert_one(
benchmark_trial_task.BenchmarkTrialTask(
bson.ObjectId(), bson.ObjectId(),
result=result_id, state=batch_analysis.task.JobState.DONE).serialize()
).inserted_id
def test_invalidate_result_removes_tasks(self):
self.assertEqual(len(self.benchmark_trial_tasks),
self.mock_db_client.tasks_collection.find().count())
invalidate.invalidate_benchmark_result(self.mock_db_client, self.benchmark_results[0])
# Check that the total number of tasks has gone down like we expected
self.assertEqual(len(self.benchmark_trial_tasks) - 1,
self.mock_db_client.tasks_collection.find().count())
# Check explicitly that each of the tasks associated with the invalidated benchmark are removed
self.assertEqual(0, self.mock_db_client.tasks_collection.find({
'_id': self.benchmark_trial_tasks[self.benchmark_results[0]]}).count())
# Check that the remaining tasks are still there.
for i in range(1, len(self.benchmark_results)):
self.assertEqual(1, self.mock_db_client.tasks_collection.find({
'_id': self.benchmark_trial_tasks[self.benchmark_results[i]]}).count())
def test_invalidate_result_removes_result(self):
invalidate.invalidate_benchmark_result(self.mock_db_client, self.benchmark_results[0])
# Check that the image collection is removed
self.assertEqual(0, self.mock_db_client.results_collection.find({
'_id': self.benchmark_results[0]}).count())
# Check that the other collections are still here
for i in range(1, len(self.benchmark_results)):
self.assertEqual(1, self.mock_db_client.results_collection.find({
'_id': self.benchmark_results[i]}).count())
def make_image_collection(db_client: database.client.DatabaseClient, length=3)\
-> core.image_collection.ImageCollection:
images = {}
for i in range(length):
image_entity = make_image(i)
image_entity.save_image_data(db_client)
images[i] = db_client.image_collection.insert_one(image_entity.serialize()).inserted_id
image_collection = core.image_collection.ImageCollection(images=images, db_client_=db_client,
type_=core.sequence_type.ImageSequenceType.NON_SEQUENTIAL)
image_collection.refresh_id(db_client.image_source_collection.insert_one(image_collection.serialize()).inserted_id)
return image_collection
def make_image(index=1, **kwargs) -> core.image_entity.ImageEntity:
kwargs = du.defaults(kwargs, {
'id_': bson.objectid.ObjectId(),
'data': np.random.uniform(0, 255, (32, 32, 3)),
'metadata': imeta.ImageMetadata(
hash_=b'\xf1\x9a\xe2|' + np.random.randint(0, 0xFFFFFFFF).to_bytes(4, 'big'),
source_type=imeta.ImageSourceType.SYNTHETIC,
camera_pose=tf.Transform(location=(1 + 100 * index, 2 + np.random.uniform(-1, 1), 3),
rotation=(4, 5, 6, 7 + np.random.uniform(-4, 4))),
intrinsics=cam_intr.CameraIntrinsics(800, 600, 550.2, 750.2, 400, 300),
environment_type=imeta.EnvironmentType.INDOOR_CLOSE,
light_level=imeta.LightingLevel.WELL_LIT, time_of_day=imeta.TimeOfDay.DAY,
lens_focal_distance=5, aperture=22, simulation_world='TestSimulationWorld',
lighting_model=imeta.LightingModel.LIT, texture_mipmap_bias=1,
normal_maps_enabled=2, roughness_enabled=True, geometry_decimation=0.8,
procedural_generation_seed=16234, labelled_objects=(
imeta.LabelledObject(class_names=('car',), bounding_box=(12, 144, 67, 43),
label_color=(123, 127, 112),
relative_pose=tf.Transform((12, 3, 4), (0.5, 0.1, 1, 1.7)),
object_id='Car-002'),
imeta.LabelledObject(class_names=('cat',), bounding_box=(125, 244, 117, 67),
label_color=(27, 89, 62),
relative_pose=tf.Transform((378, -1890, 38), (0.3, 1.12, 1.1, 0.2)),
object_id='cat-090')
), average_scene_depth=90.12),
'additional_metadata': {
'Source': 'Generated',
'Resolution': {'width': 1280, 'height': 720},
'Material Properties': {
'BaseMipMapBias': 0,
'RoughnessQuality': True
}
},
'depth_data': np.random.uniform(0, 1, (32, 32)),
'labels_data': np.random.uniform(0, 1, (32, 32, 3)),
'world_normals_data': np.random.uniform(0, 1, (32, 32, 3))
})
return ie.ImageEntity(**kwargs)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# $Id$
#
import sys
import os
import pexpect
import time
import logging
import tempfile
from systemmanagementinterface import SystemManagementInterface
from zoni.extra.util import timeF, log
class dellDrac(SystemManagementInterface):
def __init__(self, config, nodeName, hostInfo):
self.config = config
self.nodeName = nodeName
self.hostname = hostInfo['location']
self.host = hostInfo['drac_name']
self.user = hostInfo['drac_userid']
self.password = hostInfo['drac_password']
self.port = hostInfo['drac_port']
self.powerStatus = None
self.verbose = False
self.server = "Server-" + str(self.port)
self.log = logging.getLogger(__name__)
def setVerbose(self, verbose):
self.verbose = verbose
def __login(self):
switchIp = "telnet " + self.host
child = pexpect.spawn(switchIp)
if self.verbose:
child.logfile = sys.stdout
opt = child.expect(['Login:', pexpect.EOF, pexpect.TIMEOUT])
child.setecho(False)
if opt == 0:
time.sleep(.5)
child.sendline(self.user)
i=child.expect(["assword:", pexpect.EOF, pexpect.TIMEOUT])
child.sendline(self.password)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
if i == 2:
self.log.error("Login to %s failed" % (switchIp))
return -1
else:
mesg = "Error"
self.log.error(mesg)
return -1
return child
@timeF
def __setPowerStatus(self):
fout = tempfile.TemporaryFile()
child = self.__login()
child.logfile = fout
cmd = "getmodinfo -m " + self.server
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
for i in fout.readlines():
if "ON" in i and self.server in i:
mesg = self.hostname + " Power is on\n\n"
self.powerStatus = 1
if "OFF" in i and self.server in i:
mesg = self.hostname + " Power is off\n\n"
self.powerStatus = 0
self.log.info(mesg)
fout.close()
child.close()
child.terminate()
@timeF
def isPowered(self):
if self.powerStatus == None:
self.__setPowerStatus()
if self.powerStatus:
return 0;
return 1;
def getPowerStatus(self):
return self.isPowered()
@timeF
def powerOn(self):
code = 0
fout = tempfile.TemporaryFile()
if self.powerStatus == 1:
self.log.info("Hardware power on : %s", self.hostname)
return 1
child = self.__login()
child.logfile = fout
cmd = "racadm serveraction -m " + self.server + " powerup"
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
self.log.info("Hardware power on : %s", self.hostname)
for val in fout.readlines():
if "OK" in val:
code = 1
if "ALREADY POWER-ON" in val:
code = 1
self.log.info("Hardware already powered on : %s", self.hostname)
if code < 1:
self.log.info("Hardware power on failed : %s", self.hostname)
fout.close()
child.terminate()
return code
@timeF
def powerOff(self):
code = 0
fout = tempfile.TemporaryFile()
child = self.__login()
child.logfile = fout
cmd = "racadm serveraction -m " + self.server + " powerdown"
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
self.log.info("Hardware power off : %s", self.hostname)
for val in fout.readlines():
if "OK" in val:
code = 1
if "CURRENTLY POWER-OFF" in val:
self.log.info("Hardware already power off : %s", self.hostname)
code = 1
if code < 1:
self.log.info("Hardware power off failed : %s", self.hostname)
child.terminate()
fout.close()
return code
@timeF
def powerOffSoft(self):
code = 0
fout = tempfile.TemporaryFile()
child = self.__login()
child.logfile = fout
cmd = "racadm serveraction -m " + self.server + " graceshutdown"
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
self.log.info("Hardware power off (soft): %s", self.hostname)
for val in fout.readlines():
if "OK" in val:
code = 1
if "CURRENTLY POWER-OFF" in val:
self.log.info("Hardware already power off : %s", self.hostname)
code = 1
if code < 1:
self.log.info("Hardware power off failed : %s", self.hostname)
child.terminate()
fout.close()
return code
@timeF
def powerCycle(self):
code = 0
fout = tempfile.TemporaryFile()
child = self.__login()
child.logfile = fout
cmd = "racadm serveraction -m " + self.server + " powercycle"
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
self.log.info("Hardware power cycle : %s", self.hostname)
for val in fout.readlines():
if "OK" in val:
code = 1
if code < 1:
self.log.info("Hardware power cycle failed : %s", self.hostname)
child.terminate()
fout.close()
return code
@timeF
def powerReset(self):
code = 0
fout = tempfile.TemporaryFile()
child = self.__login()
child.logfile = fout
cmd = "racadm serveraction -m " + self.server + " hardreset"
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
fout.seek(0)
for val in fout.readlines():
if "OK" in val:
self.log.info("Hardware power reset : %s", self.nodeName)
code = 1
if code < 1:
self.log.info("Hardware power reset fail: %s", self.nodeName)
child.terminate()
fout.close()
return code
def activateConsole(self):
child = self.__login()
cmd = "connect -F " + self.server
child.sendline(cmd)
i=child.expect(['DRAC/MC:', pexpect.EOF, pexpect.TIMEOUT])
child.terminate()
| |
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import os
import yaml
from dp import DP
from port import Port
from vlan import VLAN
from watcher_conf import WatcherConf
def get_logger(logname):
return logging.getLogger(logname + '.config')
def read_config(config_file, logname):
logger = get_logger(logname)
try:
with open(config_file, 'r') as stream:
conf = yaml.safe_load(stream)
except yaml.YAMLError as ex:
logger.error('Error in file %s (%s)', config_file, str(ex))
return None
return conf
def config_file_hash(config_file_name):
config_file = open(config_file_name)
return hashlib.sha256(config_file.read()).hexdigest()
def dp_parser(config_file, logname):
logger = get_logger(logname)
conf = read_config(config_file, logname)
if conf is None:
return None
version = conf.pop('version', 1)
if version == 1:
return _dp_parser_v1(conf, config_file, logname)
elif version == 2:
return _dp_parser_v2(conf, config_file, logname)
else:
logger.error('unsupported config version number %s', version)
return None
def port_parser(dp_id, p_identifier, port_conf, vlans):
port = Port(p_identifier, port_conf)
if port.mirror is not None:
# ignore other config
return port
if port.native_vlan is not None:
v_identifier = port.native_vlan
vlan = vlans.setdefault(v_identifier, VLAN(v_identifier, dp_id))
vlan.untagged.append(port)
for v_identifier in port.tagged_vlans:
vlan = vlans.setdefault(v_identifier, VLAN(v_identifier, dp_id))
vlan.tagged.append(port)
return port
def _dp_config_path(config_file, parent_file=None):
if parent_file and not os.path.isabs(config_file):
return os.path.realpath(os.path.join(os.path.dirname(parent_file), config_file))
else:
return os.path.realpath(config_file)
def _dp_parser_v1(conf, config_file, logname):
logger = get_logger(logname)
config_path = _dp_config_path(config_file)
# TODO: warn when the configuration contains meaningless elements
# they are probably typos
if 'dp_id' not in conf:
logger.error('dp_id not configured in file %s', config_file)
dp_id = conf['dp_id']
dp = DP(dp_id, conf)
interfaces_conf = conf.pop('interfaces', {})
vlans_conf = conf.pop('vlans', {})
acls_conf = conf.pop('acls', {})
logger.info(str(dp))
vlans = {}
for vid, vlan_conf in vlans_conf.iteritems():
vlans[vid] = VLAN(vid, dp_id, vlan_conf)
for port_num, port_conf in interfaces_conf.iteritems():
dp.add_port(port_parser(dp_id, port_num, port_conf, vlans))
for acl_num, acl_conf in acls_conf.iteritems():
dp.add_acl(acl_num, acl_conf)
for vlan in vlans.itervalues():
dp.add_vlan(vlan)
dp.finalize_config()
try:
dp.sanity_check()
except AssertionError as err:
logger.exception('Error in config file: %s', err)
return None
return ({config_path: config_file_hash(config_path)}, [dp])
def _dp_include(config_hashes, parent_file, config_file, dps_conf, vlans_conf, acls_conf, logname):
logger = get_logger(logname)
# Save the updated configuration state in separate dicts,
# so if an error is found, the changes can simply be thrown away.
new_config_hashes = config_hashes.copy()
new_dps_conf = dps_conf.copy()
new_vlans_conf = vlans_conf.copy()
new_acls_conf = acls_conf.copy()
if not os.path.isfile(config_file):
logger.warning('not a regular file or does not exist: %s', config_file)
return False
conf = read_config(config_file, logname)
if not conf:
logger.warning('error loading config from file: %s', config_file)
return False
# Add the SHA256 hash for this configuration file, so FAUCET can determine
# whether or not this configuration file should be reloaded upon receiving
# a HUP signal.
new_config_hashes[config_file] = config_file_hash(config_file)
new_dps_conf.update(conf.pop('dps', {}))
new_vlans_conf.update(conf.pop('vlans', {}))
new_acls_conf.update(conf.pop('acls', {}))
for include_file in conf.pop('include', []):
include_path = _dp_config_path(include_file, parent_file=config_file)
if include_path in config_hashes:
logger.error(
'include file %s already loaded, include loop found in file: %s',
include_path,
config_file,
)
return False
if not _dp_include(new_config_hashes,
config_file, include_path,
new_dps_conf, new_vlans_conf, new_acls_conf,
logname):
logger.error('unable to load required include file: %s', include_path)
return False
for include_file in conf.pop('include-optional', []):
include_path = _dp_config_path(include_file, parent_file=config_file)
if include_path in config_hashes:
logger.error(
'include file %s already loaded, include loop found in file: %s',
include_path,
config_file,
)
return False
if not _dp_include(new_config_hashes,
config_file, include_path,
new_dps_conf, new_vlans_conf, new_acls_conf,
logname):
new_config_hashes[include_path] = None
logger.warning('skipping optional include file: %s', include_path)
# Actually update the configuration data structures,
# now that this file has been successfully loaded.
config_hashes.update(new_config_hashes)
dps_conf.update(new_dps_conf)
vlans_conf.update(new_vlans_conf)
acls_conf.update(new_acls_conf)
return True
def _dp_add_vlan(vid_dp, dp, vlan, logname):
if vlan.vid not in vid_dp:
vid_dp[vlan.vid] = set()
if len(vid_dp[vlan.vid]) > 1:
assert not vlan.bgp_routerid, \
"DPs {0} sharing a BGP speaker VLAN is unsupported".format(
str.join(", ", vid_dp[vlan.vid]),
)
if vlan not in dp.vlans:
dp.add_vlan(vlan)
vid_dp[vlan.vid].add(dp.name)
def _dp_parser_v2(conf, config_file, logname):
logger = get_logger(logname)
config_path = _dp_config_path(config_file)
config_hashes = {}
dps_conf = {}
vlans_conf = {}
acls_conf = {}
if not _dp_include(config_hashes, None, config_path, dps_conf, vlans_conf, acls_conf, logname):
logger.critical('error found while loading config file: %s', config_path)
return None
if not dps_conf:
logger.critical('dps not configured in file: %s', config_path)
return None
dps = []
vid_dp = {}
for identifier, dp_conf in dps_conf.iteritems():
ports_conf = dp_conf.pop('interfaces', {})
dp = DP(identifier, dp_conf)
dp.sanity_check()
dp_id = dp.dp_id
vlans = {}
ports = {}
for vid, vlan_conf in vlans_conf.iteritems():
vlans[vid] = VLAN(vid, dp_id, vlan_conf)
try:
for port_num, port_conf in ports_conf.iteritems():
port = port_parser(dp_id, port_num, port_conf, vlans)
ports[port_num] = port
if port.native_vlan is not None:
_dp_add_vlan(vid_dp, dp, vlans[port.native_vlan], logname)
if port.tagged_vlans is not None:
for vid in port.tagged_vlans:
_dp_add_vlan(vid_dp, dp, vlans[vid], logname)
except AssertionError as err:
logger.exception('Error in config file: %s', err)
return None
for port in ports.itervalues():
dp.add_port(port)
for a_identifier, acl_conf in acls_conf.iteritems():
# TODO: turn this into an object
dp.add_acl(a_identifier, acl_conf)
dp.finalize_config()
dps.append(dp)
return (config_hashes, dps)
def watcher_parser(config_file, logname):
#TODO: make this backwards compatible
conf = read_config(config_file, logname)
if isinstance(conf, dict):
# in this case it may be an old style config
return _watcher_parser_v2(conf, logname)
else:
return _watcher_parser_v1(config_file, logname)
def _watcher_parser_v1(config_file, logname):
result = []
INFLUX_KEYS = [
'influx_db',
'influx_host',
'influx_port',
'influx_user',
'influx_pwd',
'influx_timeout',
]
GAUGEDB_KEYS = [
'gdb_type',
'nosql_db',
'db_username',
'db_password',
'db_ip',
'db_fqdn',
'db_port',
'driver',
'views',
'switches_doc',
'flows_doc',
]
dps = []
with open(config_file, 'r') as conf:
for line in conf:
dps.append(dp_parser(line.strip(), logname)[1][0])
for dp in dps:
if dp.influxdb_stats:
w_type = 'port_state'
port_state_conf = {
'type': w_type,
'db_type': 'influx'
}
for key in INFLUX_KEYS:
port_state_conf[key] = dp.__dict__.get(key, None)
name = dp.name + '-' + w_type
watcher = WatcherConf(name, port_state_conf)
# add dp to watcher. prevents the dp_id attribute error in gauge.
watcher.add_dp(dp)
result.append(watcher)
if dp.monitor_ports:
w_type = 'port_stats'
port_stats_conf = {'type': w_type}
port_stats_conf['interval'] = dp.monitor_ports_interval
if dp.influxdb_stats:
port_stats_conf['db_type'] = 'influx'
for key in INFLUX_KEYS:
port_stats_conf[key] = dp.__dict__.get(key, None)
else:
port_stats_conf['db_type'] = 'text'
port_stats_conf['file'] = dp.monitor_ports_file
name = dp.name + '-' + w_type
watcher = WatcherConf(name, port_stats_conf)
# add dp to watcher. prevents the dp_id attribute error in gauge.
watcher.add_dp(dp)
result.append(watcher)
if dp.monitor_flow_table:
w_type = 'flow_table'
flow_table_conf = {'type': w_type}
flow_table_conf['interval'] = dp.monitor_flow_table_interval
flow_table_conf['file'] = dp.monitor_flow_table_file
name = dp.name + '-' + w_type
watcher = WatcherConf(name, flow_table_conf)
# add dp to watcher. prevents the dp_id attribute error in gauge.
watcher.add_dp(dp)
result.append(watcher)
if dp.gaugedb_updates:
w_type = 'flow_table'
flow_table_conf = {'type': w_type}
flow_table_conf['db_type'] = 'gaugedb'
flow_table_conf['interval'] = dp.monitor_flow_table_interval
flow_table_conf['db_update_counter'] = dp.gaugedb_update_counter
name = dp.name + '-' + w_type
for key in GAUGEDB_KEYS:
flow_table_conf[key] = dp.__dict__.get('gaugedb').get(
key, None)
watcher = WatcherConf(name, flow_table_conf)
watcher.add_dp(dp)
result.append(watcher)
return result
def _watcher_parser_v2(conf, logname):
logger = get_logger(logname)
result = []
dps = {}
for faucet_file in conf['faucet_configs']:
__, dp_list = dp_parser(faucet_file, logname)
for dp in dp_list:
dps[dp.name] = dp
dbs = conf.pop('dbs')
for name, dictionary in conf['watchers'].iteritems():
for dp_name in dictionary['dps']:
if dp_name not in dps:
errormsg = "dp: {0} metered but not configured".format(
dp_name
)
logger.error(errormsg)
continue
dp = dps[dp_name]
watcher = WatcherConf(name, dictionary)
watcher.add_db(dbs[watcher.db])
watcher.add_dp(dp)
result.append(watcher)
return result
| |
# Third-party
import numpy as np
# This package
from .. import combine, PhaseSpacePosition
from ..nbody import DirectNBody
from ...potential import Hamiltonian, PotentialBase
from ...integrate.timespec import parse_time_specification
from ._mockstream import mockstream_dop853, mockstream_dop853_animate
from .core import MockStream
__all__ = ['MockStreamGenerator']
class MockStreamGenerator:
def __init__(self, df, hamiltonian, progenitor_potential=None):
"""Generate a mock stellar stream in the specified external potential.
By default, you must pass in a specification of the stream distribution
function (``df``), and the external gravitational potential and
reference frame (via a `~gala.potential.Hamiltonian` object passed in
through the ``hamiltonian`` argument).
Also by default, the stream generation does not include the self-gravity
of the progenitor system: star particles are generated using the ``df``
object, and released into the external potential specified by the
``hamiltonian``. If you would like the star particles to feel the
gravitational field of the progenitor system, you may pass in a
potential object to represent the progenitor via the
``progenitor_potential`` argument. This can be any valid gala potential
instance.
Parameters
----------
df : `~gala.dynamics.BaseStreamDF` subclass instance
The stream distribution function (DF) object that specifies how to
generate stream star particle initial conditions.
hamiltonian : `~gala.potential.Hamiltonian`
The external potential and reference frame to numerically integrate
orbits in.
progenitor_potential : `~gala.potential.PotentialBase` (optional)
If specified, the self-gravity of the progenitor system is included
in the force calculation and orbit integration. If not specified,
self-gravity is not accounted for. Default: ``None``
"""
from .df import BaseStreamDF
if not isinstance(df, BaseStreamDF):
raise TypeError('The input distribution function (DF) instance '
'must be an instance of a subclass of '
'BaseStreamDF, not {}.'.format(type(df)))
self.df = df
# Validate the inpute hamiltonian
self.hamiltonian = Hamiltonian(hamiltonian)
if progenitor_potential is not None:
# validate the potential class
if not isinstance(progenitor_potential, PotentialBase):
raise TypeError("If specified, the progenitor_potential must "
"be a gala.potential class instance.")
self.self_gravity = True
else:
self.self_gravity = False
self.progenitor_potential = progenitor_potential
def _get_nbody(self, prog_w0, nbody):
"""Internal function that adds the progenitor to the list of nbody
objects to integrate along with the test particles in the stream.
"""
kwargs = dict()
if nbody is not None:
if nbody.external_potential != self.hamiltonian.potential:
raise ValueError('The external potential of the input nbody '
'instance must match the potential of the mock '
'stream input hamiltonian! {} vs. {}'
.format(nbody.external_potential,
self.hamiltonian.potential))
if nbody.frame != self.hamiltonian.frame:
raise ValueError('The reference frame of the input nbody '
'instance must match the frame of the mock '
'stream input hamiltonian! {} vs. {}'
.format(nbody.frame, self.hamiltonian.frame))
kwargs['w0'] = combine((prog_w0, nbody.w0))
kwargs['particle_potentials'] = ([self.progenitor_potential] +
nbody.particle_potentials)
kwargs['external_potential'] = self.hamiltonian.potential
kwargs['frame'] = self.hamiltonian.frame
kwargs['units'] = self.hamiltonian.units
kwargs['save_all'] = nbody.save_all
else:
kwargs['w0'] = prog_w0
kwargs['particle_potentials'] = [self.progenitor_potential]
kwargs['external_potential'] = self.hamiltonian.potential
kwargs['frame'] = self.hamiltonian.frame
kwargs['units'] = self.hamiltonian.units
return DirectNBody(**kwargs)
def run(self, prog_w0, prog_mass, nbody=None,
release_every=1, n_particles=1,
output_every=None, output_filename=None,
check_filesize=True, overwrite=False,
**time_spec):
"""Run the mock stream generator with the specified progenitor initial
conditions.
This method generates the mock stellar stream for the specified
progenitor system properties. The progenitor orbit is specified by
passing in the initial or final conditions ``prog_w0`` and by specifying
time-stepping information via the ``**time_spec`` keyword arguments. If
the time-stepping specification proceeds forward in time, ``prog_w0`` is
interpreted as initial conditions and the mock stream is generated
forwards from this position. If the time-stepping proceeds backwards in
time, the progenitor orbit is first numerically integrated backwards
given the time-stepping information, then the stream is generated
forward from the past such that ``prog_w0`` becomes the final position
of the progenitor.
Note that the stream generation also supports including other massive
perturbers that can gravitationally influence the stream stars. These
other massive bodies must be passed in as a `~gala.dynamics.DirectNBody`
instance through the ``nbody`` argument. The phase-space coordinates of
the bodies, ``nbody.w0``, are interpreted as initial or final conditions
with the same logic as above.
Parameters
----------
prog_w0 : `~gala.dynamics.PhaseSpacePosition`
The initial or final phase-space position of the progenitor system
(see note above).
prog_mass : `~astropy.units.Quantity` [mass]
The mass of the progenitor system, passed in to the stream
distribution function (df) ``.sample()`` method. This quantity sets
the scale mass of the particle release df, but not the mass of the
progenitor potential used to compute the self-gravity on the stream
particles.
nbody : `~gala.dynamics.DirectNBody` (optional)
This allows specifying other massive perturbers (N-bodies) that can
gravitationally influence the stream star orbits.
release_every : int (optional)
Controls how often to release stream particles from each tail.
Default: 1, meaning release particles at each timestep.
n_particles : int, array_like (optional)
If an integer, this controls the number of particles to release in
each tail at each release timestep. Alternatively, you can pass in
an array with the same shape as the number of timesteps to release
bursts of particles at certain times (e.g., pericenter).
output_every : int (optional)
Controls whether to output snapshots of the stream particle orbits.
This is relative to the global time array.
output_filename : str (optional)
The path to the HDF5 file to be generated by the snapshotting.
check_filesize : bool (optional)
If True (the default value), this controls whether to check the
estimated size of the output file, and emits a warning if the file
is >8GB in size.
overwrite : bool (optional)
Overwrite the output file if it exists.
**time_spec
Specification of how long to integrate. Most commonly, this is a
timestep ``dt`` and number of steps ``n_steps``, or a timestep
``dt``, initial time ``t1``, and final time ``t2``. You may also
pass in a time array with ``t``. See documentation for
`~gala.integrate.parse_time_specification` for more information.
Returns
-------
stream_w : `~gala.dynamics.PhaseSpacePosition`
nbody_w : `~gala.dynamics.PhaseSpacePosition`
"""
units = self.hamiltonian.units
t = parse_time_specification(units, **time_spec)
prog_nbody = self._get_nbody(prog_w0, nbody)
nbody_orbits = prog_nbody.integrate_orbit(t=t)
# If the time stepping passed in is negative, assume this means that all
# of the initial conditions are at *end time*, and we first need to
# integrate them backwards before treating them as initial conditions
if t[1] < t[0]:
nbody_orbits = nbody_orbits[::-1]
# TODO: this could be cleaned up...
nbody0 = DirectNBody(
nbody_orbits[0], prog_nbody.particle_potentials,
external_potential=self.hamiltonian.potential,
frame=self.hamiltonian.frame, units=units)
else:
nbody0 = prog_nbody
prog_orbit = nbody_orbits[:, 0] # Note: Progenitor must be idx 0!
orbit_t = prog_orbit.t.decompose(units).value
# Generate initial conditions from the DF
stream_w0 = self.df.sample(prog_orbit, prog_mass,
hamiltonian=self.hamiltonian,
release_every=release_every,
n_particles=n_particles)
w0 = np.vstack((stream_w0.xyz.decompose(units).value,
stream_w0.v_xyz.decompose(units).value)).T
w0 = np.ascontiguousarray(w0)
unq_t1s, nstream = np.unique(stream_w0.release_time.decompose(units).value,
return_counts=True)
all_nstream = np.zeros(prog_orbit.ntimes, dtype=int)
for t1, n in zip(unq_t1s, nstream):
all_nstream[orbit_t == t1] = n
if output_every is None: # store snapshots
raw_nbody, raw_stream = mockstream_dop853(
nbody0, orbit_t[all_nstream != 0], w0, unq_t1s,
all_nstream[all_nstream != 0].astype('i4'))
else:
if output_filename is None:
raise ValueError("If output_every is specified, you must also "
"pass in a filename to store the snapshots in")
raw_nbody, raw_stream = mockstream_dop853_animate(
nbody0, orbit_t, w0, all_nstream.astype('i4'),
output_every=output_every, output_filename=output_filename,
check_filesize=check_filesize, overwrite=overwrite)
x_unit = units['length']
v_unit = units['length'] / units['time']
stream_w = MockStream(pos=raw_stream[:, :3].T * x_unit,
vel=raw_stream[:, 3:].T * v_unit,
release_time=stream_w0.release_time,
lead_trail=stream_w0.lead_trail)
nbody_w = PhaseSpacePosition(pos=raw_nbody[:, :3].T * x_unit,
vel=raw_nbody[:, 3:].T * v_unit)
return stream_w, nbody_w
| |
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
from .cluster.bicluster.utils import get_indices
return get_indices(self.rows_[i], self.columns_[i])
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
from .cluster.bicluster.utils import get_shape
return get_shape(self.rows_[i], self.columns_[i])
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .cluster.bicluster.utils import get_submatrix
return get_submatrix(self.rows_[i], self.columns_[i], data)
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| |
"""
Functions for identifying peaks in signals.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import xrange
from scipy.signal.wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmax
Notes
-----
This function uses `argrelextrema` with np.less as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelextrema, argrelmin
Notes
-----
This function uses `argrelextrema` with np.greater as comparator.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take 2 numbers as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> data = np.random.rand(5,5)
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.where(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.where(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in xrange(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.zeros_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None, min_snr=1, noise_perc=10):
"""
Attempt to find the peaks in a 1-D array.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : sequence
1-D array of widths to use for calculating the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is 2.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
Returns
-------
peaks_indices : ndarray
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
doi: 10.1093/bioinformatics/btl355
http://bioinformatics.oxfordjournals.org/content/22/17/2059.long
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
widths = np.asarray(widths)
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
min_snr=min_snr, noise_perc=noise_perc)
max_locs = np.asarray([x[1][0] for x in filtered])
max_locs.sort()
return max_locs
| |
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from ..base import (CommandLineInputSpec, CommandLine, traits,
TraitedSpec, File, StdOutCommandLine,
StdOutCommandLineInputSpec, isdefined)
from ...utils.filemanip import split_filename
class QBallMXInputSpec(StdOutCommandLineInputSpec):
basistype = traits.Enum('rbf', 'sh', argstr='-basistype %s',
desc=('Basis function type. "rbf" to use radial basis functions '
'"sh" to use spherical harmonics'), usedefault=True)
scheme_file = File(exists=True, argstr='-schemefile %s', mandatory=True,
desc='Specifies the scheme file for the diffusion MRI data')
order = traits.Int(argstr='-order %d', units='NA',
desc=('Specific to sh. Maximum order of the spherical harmonic series. '
'Default is 4.'))
rbfpointset = traits.Int(argstr='-rbfpointset %d', units='NA',
desc=('Specific to rbf. Sets the number of radial basis functions to use. '
'The value specified must be present in the Pointsets directory. '
'The default value is 246.'))
rbfsigma = traits.Float(argstr='-rbfsigma %f', units='NA',
desc=('Specific to rbf. Sets the width of the interpolating basis functions. '
'The default value is 0.2618 (15 degrees).'))
smoothingsigma = traits.Float(argstr='-smoothingsigma %f', units='NA',
desc=('Specific to rbf. Sets the width of the smoothing basis functions. '
'The default value is 0.1309 (7.5 degrees).'))
class QBallMXOutputSpec(TraitedSpec):
qmat = File(exists=True, desc='Q-Ball reconstruction matrix')
class QBallMX(StdOutCommandLine):
"""
Generates a reconstruction matrix for Q-Ball. Used in LinRecon with
the same scheme file to reconstruct data.
Example 1
---------
To create a linear transform matrix using Spherical Harmonics (sh).
>>> import nipype.interfaces.camino as cam
>>> qballmx = cam.QBallMX()
>>> qballmx.inputs.scheme_file = 'A.scheme'
>>> qballmx.inputs.basistype = 'sh'
>>> qballmx.inputs.order = 6
>>> qballmx.run() # doctest: +SKIP
Example 2
---------
To create a linear transform matrix using Radial Basis Functions
(rbf). This command uses the default setting of rbf sigma = 0.2618
(15 degrees), data smoothing sigma = 0.1309 (7.5 degrees), rbf
pointset 246
>>> import nipype.interfaces.camino as cam
>>> qballmx = cam.QBallMX()
>>> qballmx.inputs.scheme_file = 'A.scheme'
>>> qballmx.run() # doctest: +SKIP
The linear transform matrix from any of these two examples can then
be run over each voxel using LinRecon
>>> qballcoeffs = cam.LinRecon()
>>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat'
>>> qballcoeffs.inputs.scheme_file = 'A.scheme'
>>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble'
>>> qballcoeffs.inputs.normalize = True
>>> qballcoeffs.inputs.bgmask = 'brain_mask.nii'
>>> qballcoeffs.run() # doctest: +SKIP
"""
_cmd = 'qballmx'
input_spec = QBallMXInputSpec
output_spec = QBallMXOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['qmat'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.scheme_file)
return name + '_qmat.Bdouble'
class LinReconInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=1,
desc='voxel-order data filename')
scheme_file = File(exists=True, argstr='%s', mandatory=True, position=2,
desc='Specifies the scheme file for the diffusion MRI data')
qball_mat = File(exists=True, argstr='%s', mandatory=True, position=3,
desc='Linear transformation matrix.')
normalize = traits.Bool(argstr='-normalize',
desc=('Normalize the measurements and discard '
'the zero measurements before the linear transform.'))
log = traits.Bool(argstr='-log',
desc=('Transform the log measurements rather than the '
'measurements themselves'))
bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask')
class LinReconOutputSpec(TraitedSpec):
recon_data = File(exists=True, desc='Transformed data')
class LinRecon(StdOutCommandLine):
"""
Runs a linear transformation in each voxel.
Reads a linear transformation from the matrix file assuming the
imaging scheme specified in the scheme file. Performs the linear
transformation on the data in every voxel and outputs the result to
the standard output. The ouput in every voxel is actually: ::
[exit code, ln(S(0)), p1, ..., pR]
where p1, ..., pR are the parameters of the reconstruction.
Possible exit codes are:
- 0. No problems.
- 6. Bad data replaced by substitution of zero.
The matrix must be R by N+M where N+M is the number of measurements
and R is the number of parameters of the reconstruction. The matrix
file contains binary double-precision floats. The matrix elements
are stored row by row.
Example
---------
First run QBallMX and create a linear transform matrix using
Spherical Harmonics (sh).
>>> import nipype.interfaces.camino as cam
>>> qballmx = cam.QBallMX()
>>> qballmx.inputs.scheme_file = 'A.scheme'
>>> qballmx.inputs.basistype = 'sh'
>>> qballmx.inputs.order = 4
>>> qballmx.run() # doctest: +SKIP
Then run it over each voxel using LinRecon
>>> qballcoeffs = cam.LinRecon()
>>> qballcoeffs.inputs.in_file = 'SubjectA.Bfloat'
>>> qballcoeffs.inputs.scheme_file = 'A.scheme'
>>> qballcoeffs.inputs.qball_mat = 'A_qmat.Bdouble'
>>> qballcoeffs.inputs.normalize = True
>>> qballcoeffs.run() # doctest: +SKIP
"""
_cmd = 'linrecon'
input_spec = LinReconInputSpec
output_spec = LinReconOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['recon_data'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.scheme_file)
return name + '_recondata.Bdouble'
class MESDInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True, position=1,
desc='voxel-order data filename')
inverter = traits.Enum('SPIKE', 'PAS', argstr='-filter %s', position=2, mandatory=True,
desc=('The inversion index specifies the type of inversion to perform on the data.'
'The currently available choices are:'
'Inverter name | Inverter parameters'
'---------------|------------------'
'SPIKE | bd (b-value x diffusivity along the fibre.)'
'PAS | r'))
inverter_param = traits.Float(argstr='%f', units='NA', position=3, mandatory=True,
desc=('Parameter associated with the inverter. Cf. inverter description for'
'more information.'))
fastmesd = traits.Bool(argstr='-fastmesd', requires=['mepointset'],
desc=('Turns off numerical integration checks and fixes the integration point set size at that of'
'the index specified by -basepointset..'))
mepointset = traits.Int(argstr='-mepointset %d', units='NA',
desc=('Use a set of directions other than those in the scheme file for the deconvolution kernel.'
'The number refers to the number of directions on the unit sphere. For example, '
'"-mepointset 54" uses the directions in "camino/PointSets/Elec054.txt".'))
scheme_file = File(exists=True, argstr='-schemefile %s', mandatory=True,
desc='Specifies the scheme file for the diffusion MRI data')
bgmask = File(exists=True, argstr='-bgmask %s', desc='background mask')
inputdatatype = traits.Enum('float', 'char', 'short', 'int', 'long', 'double', argstr='-inputdatatype %s',
desc=('Specifies the data type of the input file: "char", "short", "int", "long",'
'"float" or "double". The input file must have BIG-ENDIAN ordering.'
'By default, the input type is "float".'))
class MESDOutputSpec(TraitedSpec):
mesd_data = File(exists=True, desc='MESD data')
class MESD(StdOutCommandLine):
"""
MESD is a general program for maximum entropy spherical deconvolution.
It also runs PASMRI, which is a special case of spherical deconvolution.
The input data must be in voxel order.
The format of the output in each voxel is:
{ exitcode, ln(A^star(0)), lambda_0, lambda_1, ..., lambda_N }
The exitcode contains the results of three tests. The first test thresholds
the maximum relative error between the numerical integrals computed at con-
vergence and those computed using a larger test point set; if the error is
greater than a threshold the exitcode is increased from zero to one as a
warning; if it is greater than a larger threshold the exitcode is increased to
two to suggest failure. The second test thresholds the predicted error in
numerical integrals computed using the test point set; if the predicted error
is greater than a threshold the exitcode is increased by 10. The third test
thresholds the RMS error between the measurements and their predictions from
the fitted deconvolution; if the errors are greater than a threshold, the exit
code is increased by 100. An exitcode of 112 means that all three tests were
failed and the result is likely to be unreliable. If all is well the exitcode
is zero. Results are often still reliable even if one or two of the tests are
failed.
Other possible exitcodes are:
- 5 - The optimization failed to converge
- -1 - Background
- -100 - Something wrong in the MRI data, e.g. negative or zero measurements,
so that the optimization could not run.
The standard MESD implementation is computationally demanding, particularly
as the number of measurements increases (computation is approximately O(N^2),
where N is the number of measurements). There are two ways to obtain significant
computational speed-up:
i) Turn off error checks and use a small point set for computing numerical
integrals in the algorithm by adding the flag -fastmesd. Sakaie CDMRI 2008
shows that using the smallest point set (-basepointset 0) with no
error checks usually has only a minor effect on the output of the algorithm,
but provides a major reduction in computation time. You can increase the point
set size using -basepointset with an argument higher than 0, which may produce
better results in some voxels, but will increase computation time, which
approximately doubles every time the point set index increases by 1.
ii) Reduce the complexity of the maximum entropy encoding using -mepointset <X>.
By default <X> = N, the number of measurements, and is the number of parameters
in the max. ent. representation of the output function, ie the number of
lambda parameters, as described in Jansons and Alexander Inverse Problems 2003.
However, we can represent the function using less components and <X> here
specifies the number of lambda parameters. To obtain speed-up, set <X>
< N; complexity become O(<X>^2) rather than O(N^2). Note that <X> must be chosen
so that the camino/PointSets directory contains a point set with that number
of elements. When -mepointset decreases, the numerical integration checks
make less and less of a difference and smaller point sets for numerical
integration (see -basepointset) become adequate. So when <X> is low -fastmesd is
worth using to get even more speed-up.
The choice of <X> is a parameter of the technique. Too low and you lose angular
resoloution; too high and you see no computational benefit and may even suffer
from overfitting. Empirically, we have found that <X>=16 often gives good
results and good speed up, but it is worth trying a few values a comparing
performance. The reduced encoding is described in the following ISMRM abstract:
Sweet and Alexander "Reduced Encoding Persistent Angular Structure" 572 ISMRM 2010.
Example
---------
Run MESD on every voxel of the data file SubjectA.Bfloat using the PASMRI kernel.
>>> import nipype.interfaces.camino as cam
>>> mesd = cam.MESD()
>>> mesd.inputs.in_file = 'SubjectA.Bfloat'
>>> mesd.inputs.scheme_file = 'A.scheme'
>>> mesd.inputs.inverter = 'PAS'
>>> mesd.inputs.inverter_param = 1.4
>>> mesd.run() # doctest: +SKIP
"""
_cmd = 'mesd'
input_spec = MESDInputSpec
output_spec = MESDOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['mesd_data'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.scheme_file)
return name + '_MESD.Bdouble'
class SFPeaksInputSpec(StdOutCommandLineInputSpec):
in_file = File(exists=True, argstr='-inputfile %s', mandatory=True,
desc='Voxel-order data of spherical functions')
inputmodel = traits.Enum('sh', 'maxent', 'rbf', argstr='-inputmodel %s', mandatory=True,
desc=('Type of functions input via in_file. Currently supported options are: '
' sh - Spherical harmonic series. Specify the maximum order of the SH series '
' with the "order" attribute if different from the default of 4. '
' maxent - Maximum entropy representations output by MESD. The reconstruction '
' directions input to MESD must be specified. By default this is the '
' same set of gradient directions (excluding zero gradients) in the '
' scheme file, so specify the "schemefile" attribute unless the '
' "mepointset" attribute was set in MESD. '
' rbf - Sums of radial basis functions. Specify the pointset with the attribute '
' "rbfpointset" if different from the default. See QBallMX.'))
order = traits.Int(argstr='-order %d', units='NA',
desc='Specific to sh. Maximum order of the spherical harmonic series.')
scheme_file = File(exists=True, argstr='%s',
desc='Specific to maxent. Specifies the scheme file.')
rbfpointset = traits.Int(argstr='-rbfpointset %d', units='NA',
desc=('Specific to rbf. Sets the number of radial basis functions to use. '
'The value specified must be present in the Pointsets directory. '
'The default value is 246.'))
mepointset = traits.Int(argstr='-mepointset %d', units='NA',
desc=('Use a set of directions other than those in the scheme file for the deconvolution '
'kernel. The number refers to the number of directions on the unit sphere. '
'For example, "mepointset = 54" uses the directions in "camino/PointSets/Elec054.txt" '
'Use this option only if you told MESD to use a custom set of directions with the same '
'option. Otherwise, specify the scheme file with the "schemefile" attribute.'))
numpds = traits.Int(argstr='-numpds %d', units='NA',
desc='The largest number of peak directions to output in each voxel.')
noconsistencycheck = traits.Bool(argstr='-noconsistencycheck',
desc='Turns off the consistency check. The output shows all consistencies as true.')
searchradius = traits.Float(argstr='-searchradius %f', units='NA',
desc='The search radius in the peak finding algorithm. The default is 0.4 (cf. "density")')
density = traits.Int(argstr='-density %d', units='NA',
desc=('The number of randomly rotated icosahedra to use in constructing the set of points for '
'random sampling in the peak finding algorithm. Default is 1000, which works well for very '
'spiky maxent functions. For other types of function, it is reasonable to set the density '
'much lower and increase the search radius slightly, which speeds up the computation.'))
pointset = traits.Int(argstr='-pointset %d', units='NA',
desc=('To sample using an evenly distributed set of points instead. The integer can be '
'0, 1, ..., 7. Index 0 gives 1082 points, 1 gives 1922, 2 gives 3002, 3 gives 4322, '
'4 gives 5882, 5 gives 8672, 6 gives 12002, 7 gives 15872.'))
pdthresh = traits.Float(argstr='-pdthresh %f', units='NA',
desc=('Base threshold on the actual peak direction strength divided by the mean of the '
'function. The default is 1.0 (the peak must be equal or greater than the mean).'))
stdsfrommean = traits.Float(argstr='-stdsfrommean %f', units='NA',
desc=('This is the number of standard deviations of the function to be added to the '
'"pdthresh" attribute in the peak directions pruning.'))
class SFPeaksOutputSpec(TraitedSpec):
peaks = File(exists=True, desc='Peaks of the spherical functions.')
class SFPeaks(StdOutCommandLine):
"""
Finds the peaks of spherical functions.
This utility reads coefficients of the spherical functions and
outputs a list of peak directions of the function. It computes the
value of the function at each of a set of sample points. Then it
finds local maxima by finding all points at which the function is
larger than for any other point within a fixed search radius (the
default is 0.4). The utility then uses Powell's algorithm to
optimize the position of each local maximum. Finally the utility
removes duplicates and tiny peaks with function value smaller than
some threshold, which is the mean of the function plus some number
of standard deviations. By default the program checks for con-
sistency with a second set of starting points, but skips the
optimization step. To speed up execution, you can turn off the con-
sistency check by setting the noconsistencycheck flag to True.
By default, the utility constructs a set of sample points by
randomly rotating a unit icosahedron repeatedly (the default is 1000
times, which produces a set of 6000 points) and concatenating the
lists of vertices. The 'pointset = <index>' attribute can tell the
utility to use an evenly distributed set of points (index 0 gives
1082 points, 1 gives 1922, 2 gives 4322, 3 gives 8672, 4 gives 15872,
5 gives 32762, 6 gives 72032), which is quicker, because you can get
away with fewer points. We estimate that you can use a factor of 2.5
less evenly distributed points than randomly distributed points and
still expect similar performance levels.
The output for each voxel is:
- exitcode (inherited from the input data).
- ln(A(0))
- number of peaks found.
- flag for consistency with a repeated run (number of directions is
the same and the directions are the same to within a threshold.)
- mean(f).
- std(f).
- direction 1 (x, y, z, f, H00, H01, H10, H11).
- direction 2 (x, y, z, f, H00, H01, H10, H11).
- direction 3 (x, y, z, f, H00, H01, H10, H11).
H is the Hessian of f at the peak. It is the matrix: ::
[d^2f/ds^2 d^2f/dsdt]
[d^2f/dtds d^2f/dt^2]
= [H00 H01]
[H10 H11]
where s and t are orthogonal coordinates local to the peak.
By default the maximum number of peak directions output in each
voxel is three. If less than three directions are found, zeros are
output for later directions. The peaks are ordered by the value of
the function at the peak. If more than the maximum number of
directions are found only the strongest ones are output. The maximum
number can be changed setting the 'numpds' attribute.
The utility can read various kinds of spherical function, but must
be told what kind of function is input using the 'inputmodel'
attribute. The description of the 'inputmodel' attribute lists
additional information required by SFPeaks for each input model.
Example
---------
First run QBallMX and create a linear transform matrix using
Spherical Harmonics (sh).
>>> import nipype.interfaces.camino as cam
>>> sf_peaks = cam.SFPeaks()
>>> sf_peaks.inputs.in_file = 'A_recon_params.Bdouble'
>>> sf_peaks.inputs.inputmodel = 'sh'
>>> sf_peaks.inputs.order = 4
>>> sf_peaks.inputs.density = 100
>>> sf_peaks.inputs.searchradius = 1.0
>>> sf_peaks.run() # doctest: +SKIP
"""
_cmd = 'sfpeaks'
input_spec = SFPeaksInputSpec
output_spec = SFPeaksOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['peaks'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_outfilename(self):
_, name, _ = split_filename(self.inputs.in_file)
return name + '_peaks.Bdouble'
| |
# Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from osc_lib.tests import utils as osc_utils
from saharaclient.api import cluster_templates as api_ct
from saharaclient.api import node_group_templates as api_ngt
from saharaclient.osc.v1 import cluster_templates as osc_ct
from saharaclient.tests.unit.osc.v1 import fakes
CT_INFO = {
"description": "Cluster template for tests",
"use_autoconfig": True,
"is_default": False,
"node_groups": [
{
"count": 2,
"id": "d29631fc-0fad-434b-80aa-7a3e9526f57c",
"name": "fakeng",
"plugin_name": 'fake',
"hadoop_version": '0.1'
}
],
"hadoop_version": "0.1",
"is_public": False,
"plugin_name": "fake",
"id": "0647061f-ab98-4c89-84e0-30738ea55750",
"anti_affinity": [],
"name": "template",
"is_protected": False,
"domain_name": 'domain.org.'
}
class TestClusterTemplates(fakes.TestDataProcessing):
def setUp(self):
super(TestClusterTemplates, self).setUp()
self.ct_mock = (
self.app.client_manager.data_processing.cluster_templates)
self.ngt_mock = (
self.app.client_manager.data_processing.node_group_templates)
self.ct_mock.reset_mock()
self.ngt_mock.reset_mock()
self.app.api_version['data_processing'] = '1'
class TestCreateClusterTemplate(TestClusterTemplates):
# TODO(apavlov): check for creation with --json
def setUp(self):
super(TestCreateClusterTemplate, self).setUp()
self.ct_mock.create.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, CT_INFO['node_groups'][0])
self.app.api_version['data_processing'] = '1.1'
# Command to test
self.cmd = osc_ct.CreateClusterTemplate(self.app, None)
def test_ct_create_minimum_options(self):
arglist = ['--name', 'template', '--node-groups', 'fakeng:2']
verifylist = [('name', 'template'),
('node_groups', ['fakeng:2'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ct_mock.create.assert_called_once_with(
description=None, hadoop_version='0.1', is_protected=False,
is_public=False, name='template', node_groups=[
{'count': 2, 'name': 'fakeng',
'node_group_template_id':
'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
plugin_name='fake', use_autoconfig=False, shares=None,
cluster_configs=None, domain_name=None)
def test_ct_create_all_options(self):
arglist = ['--name', 'template', '--node-groups', 'fakeng:2',
'--anti-affinity', 'datanode',
'--description', 'descr',
'--autoconfig', '--public', '--protected',
'--domain-name', 'domain.org.']
verifylist = [('name', 'template'),
('node_groups', ['fakeng:2']),
('description', 'descr'), ('autoconfig', True),
('public', True), ('protected', True),
('domain_name', 'domain.org.')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ct_mock.create.assert_called_once_with(
description='descr', hadoop_version='0.1', is_protected=True,
is_public=True, name='template', node_groups=[
{'count': 2, 'name': 'fakeng',
'node_group_template_id':
'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
plugin_name='fake', use_autoconfig=True, shares=None,
cluster_configs=None, domain_name='domain.org.')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Description',
'Domain name', 'Id', 'Is default',
'Is protected', 'Is public', 'Name', 'Node groups',
'Plugin name', 'Plugin version', 'Use autoconfig')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'Cluster template for tests', 'domain.org.',
'0647061f-ab98-4c89-84e0-30738ea55750', False, False,
False, 'template', 'fakeng:2', 'fake', '0.1', True)
self.assertEqual(expected_data, data)
class TestListClusterTemplates(TestClusterTemplates):
def setUp(self):
super(TestListClusterTemplates, self).setUp()
self.ct_mock.list.return_value = [api_ct.ClusterTemplate(
None, CT_INFO)]
# Command to test
self.cmd = osc_ct.ListClusterTemplates(self.app, None)
def test_ct_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
'fake', '0.1')]
self.assertEqual(expected_data, list(data))
def test_ct_list_long(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version',
'Node groups', 'Description']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
'fake', '0.1', 'fakeng:2',
'Cluster template for tests')]
self.assertEqual(expected_data, list(data))
def test_ct_list_extra_search_opts(self):
arglist = ['--plugin', 'fake', '--plugin-version', '0.1', '--name',
'templ']
verifylist = [('plugin', 'fake'), ('plugin_version', '0.1'),
('name', 'templ')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
'fake', '0.1')]
self.assertEqual(expected_data, list(data))
class TestShowClusterTemplate(TestClusterTemplates):
def setUp(self):
super(TestShowClusterTemplate, self).setUp()
self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
# Command to test
self.cmd = osc_ct.ShowClusterTemplate(self.app, None)
def test_ct_show(self):
arglist = ['template']
verifylist = [('cluster_template', 'template')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ct_mock.find_unique.assert_called_once_with(name='template')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Description',
'Domain name', 'Id', 'Is default',
'Is protected', 'Is public', 'Name', 'Node groups',
'Plugin name', 'Plugin version', 'Use autoconfig')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = (
'', 'Cluster template for tests', 'domain.org.',
'0647061f-ab98-4c89-84e0-30738ea55750', False, False, False,
'template', 'fakeng:2', 'fake', '0.1', True)
self.assertEqual(expected_data, data)
class TestDeleteClusterTemplate(TestClusterTemplates):
def setUp(self):
super(TestDeleteClusterTemplate, self).setUp()
self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
# Command to test
self.cmd = osc_ct.DeleteClusterTemplate(self.app, None)
def test_ct_delete(self):
arglist = ['template']
verifylist = [('cluster_template', ['template'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ct_mock.delete.assert_called_once_with(
'0647061f-ab98-4c89-84e0-30738ea55750')
class TestUpdateClusterTemplate(TestClusterTemplates):
# TODO(apavlov): check for update with --json
def setUp(self):
super(TestUpdateClusterTemplate, self).setUp()
self.ct_mock.update.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
None, CT_INFO)
self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
None, CT_INFO['node_groups'][0])
# Command to test
self.cmd = osc_ct.UpdateClusterTemplate(self.app, None)
def test_ct_update_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_ct_update_nothing_updated(self):
arglist = ['template']
verifylist = [('cluster_template', 'template')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ct_mock.update.assert_called_once_with(
'0647061f-ab98-4c89-84e0-30738ea55750')
def test_ct_update_all_options(self):
arglist = ['template', '--name', 'template', '--node-groups',
'fakeng:2', '--anti-affinity', 'datanode',
'--description', 'descr', '--autoconfig-enable',
'--public', '--protected', '--domain-name', 'domain.org.']
verifylist = [('cluster_template', 'template'), ('name', 'template'),
('node_groups', ['fakeng:2']),
('description', 'descr'), ('use_autoconfig', True),
('is_public', True), ('is_protected', True),
('domain_name', 'domain.org.')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments were passed
self.ct_mock.update.assert_called_once_with(
'0647061f-ab98-4c89-84e0-30738ea55750', description='descr',
hadoop_version='0.1', is_protected=True, is_public=True,
name='template',
node_groups=[
{'count': 2, 'name': 'fakeng',
'node_group_template_id':
'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
plugin_name='fake', use_autoconfig=True, domain_name='domain.org.')
# Check that columns are correct
expected_columns = ('Anti affinity', 'Description',
'Domain name', 'Id', 'Is default',
'Is protected', 'Is public', 'Name', 'Node groups',
'Plugin name', 'Plugin version', 'Use autoconfig')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('', 'Cluster template for tests', 'domain.org.',
'0647061f-ab98-4c89-84e0-30738ea55750', False, False,
False, 'template', 'fakeng:2', 'fake', '0.1', True)
self.assertEqual(expected_data, data)
def test_ct_update_private_unprotected(self):
arglist = ['template', '--private', '--unprotected']
verifylist = [('cluster_template', 'template'),
('is_protected', False), ('is_public', False)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ct_mock.update.assert_called_once_with(
'0647061f-ab98-4c89-84e0-30738ea55750', is_protected=False,
is_public=False)
| |
import mmd as MMD
from model_mmd import MMD_GAN, tf, np
from utils import variable_summaries
from cholesky import me_loss
from ops import batch_norm, conv2d, deconv2d, linear, lrelu
from glob import glob
import os
import time
class ME_GAN(MMD_GAN):
def __init__(self, sess, config, is_crop=True,
batch_size=64, output_size=64,
z_dim=100,
gfc_dim=1024, dfc_dim=1024, c_dim=3, dataset_name='default',
checkpoint_dir=None, sample_dir=None, log_dir=None, data_dir=None):
"""
Args:
sess: TensorFlow session
batch_size: The size of batch. Should be specified before training.
output_size: (optional) The resolution in pixels of the images. [64]
z_dim: (optional) Dimension of dim for Z. [100]
gf_dim: (optional) Dimension of gen filters in first conv layer. [64]
df_dim: (optional) Dimension of discrim filters in first conv layer. [64]
gfc_dim: (optional) Dimension of gen units for for fully connected layer. [1024]
dfc_dim: (optional) Dimension of discrim units for fully connected layer. [1024]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [3]
"""
self.asi = [np.zeros([batch_size, output_size, output_size, c_dim])]
super(me_DCGAN, self).__init__(sess=sess, config=config, is_crop=is_crop,
batch_size=batch_size, output_size=output_size, z_dim=z_dim,
gfc_dim=gfc_dim, dfc_dim=dfc_dim,
c_dim=c_dim, dataset_name=dataset_name, checkpoint_dir=checkpoint_dir,
sample_dir=sample_dir, log_dir=log_dir, data_dir=data_dir)
def test_location_initializer(self):
if 'lsun' in self.config.dataset:
# generator = self.gen_train_samples_from_lmdb()
data_X = self.additional_sample_images
if self.config.dataset == 'mnist':
data_X, data_y = self.load_mnist()
elif self.config.dataset == 'cifar10':
data_X, data_y = self.load_cifar10()
elif (self.config.dataset == 'GaussianMix'):
data_X, _, __ = self.load_GaussianMix()
else:
data_X = glob(os.path.join("./data", self.config.dataset, "*.jpg"))
real = np.asarray(data_X[:self.batch_size], dtype=np.float32)
return real
# sample_z = np.random.uniform(-1, 1, size=(self.sample_size , self.z_dim))
# fake = self.sess.run(self.sampler, feed_dict={self.z: sample_z})
# p = np.random.binomial(1, .5, size=(self.batch_size, 1, 1, 1))
# return p * real + (1 - p) * fake
def set_loss(self, G, images):
if self.config.kernel == '':
me = lambda gg, ii: me_loss(
gg, ii, self.df_dim, self.batch_size,
with_inv=(self.config.gradient_penalty == 0)
)
self.optim_name = 'me loss'
with tf.variable_scope('loss'):
self.optim_loss, Z = me(G, images)
else:
im_id = tf.constant(np.random.choice(np.arange(self.batch_size), self.config.test_locations))
if 'optme' in self.config.model:
with tf.variable_scope('discriminator'):
self.me_test_images = tf.get_variable(
'd_me_test_images',
# [self.batch_size, self.output_size, self.output_size, self.c_dim],
initializer=self.test_location_initializer()
)
p = tf.cast(tf.reshape(tf.multinomial([[.5, .5]], self.batch_size),
[self.batch_size, 1, 1, 1]), tf.float32)
self.me_test_images = p * self.me_test_images + (1 - p) * self.sampler
meti = tf.clip_by_value(tf.gather(self.me_test_images, im_id), 0, 1)
bloc = int(np.floor(np.sqrt(self.config.test_locations)))
tf.summary.image("train/me test image", self.imageRearrange(meti, bloc))
else:
self.me_test_images = tf.placeholder(
tf.float32,
[self.batch_size, self.output_size, self.output_size, self.c_dim],
name='me_test_images'
)
if self.config.dc_discriminator:
metl = self.discriminator(self.me_test_images, reuse=True)
else:
metl = tf.reshape(self.me_test_images, [self.batch_size, -1])
self.me_test_locations = tf.gather(metl, im_id)
assert self.config.kernel in ['Euclidean', 'mix_rq', 'mix_rbf'], \
"Kernel '%s' not supported" % self.config.kernel
kernel = getattr(MMD, '_%s_kernel' % self.config.kernel)
k_test = lambda gg: kernel(gg, self.me_test_locations, K_XY_only=True)
self.optim_name = self.config.kernel + ' kernel mean embedding loss'
with tf.variable_scope('loss'):
self.optim_loss, Z = me_loss(
k_test(G), k_test(images),
self.df_dim, self.batch_size,
with_inv=('vn' in self.config.suffix),
with_Z=True
)
if 'full_gp' in self.config.suffix:
super(ME_GAN, self).add_gradient_penalty(kernel, G, images)
else:
self.add_gradient_penalty(k_test, G, images, Z)
def add_gradient_penalty(self, k_test, fake_data, real_data, Z):
alpha = tf.random_uniform(shape=[self.batch_size, 1], minval=0., maxval=1.)
if 'mid' in self.config.suffix:
alpha = .4 + .2 * alpha
elif 'edges' in self.config.suffix:
qq = tf.cast(tf.reshape(tf.multinomial([[.5, .5]], self.batch_size),
[self.batch_size, 1]), tf.float32)
alpha = .1 * alpha * qq + (1. - .1 * alpha) * (1. - qq)
elif 'edge' in self.config.suffix:
alpha = .99 + .01 * alpha
x_hat = (1. - alpha) * real_data + alpha * fake_data
witness = tf.matmul(k_test(x_hat), Z)
gradients = tf.gradients(witness, [x_hat])[0]
penalty = tf.reduce_mean(tf.square(tf.norm(gradients, axis=1) - 1.0))
if self.config.gradient_penalty > 0:
self.gp = tf.get_variable('gradient_penalty', dtype=tf.float32,
initializer=self.config.gradient_penalty)
self.g_loss = self.optim_loss
self.d_loss = -self.optim_loss + penalty * self.gp
self.optim_name += ' gp %.1f' % self.config.gradient_penalty
else:
self.g_loss = self.optim_loss
self.d_loss = -self.optim_loss
variable_summaries([(gradients, 'dx_gradients')])
tf.summary.scalar(self.optim_name + ' G', self.g_loss)
tf.summary.scalar(self.optim_name + ' D', self.d_loss)
tf.summary.scalar('dx_penalty', penalty)
# def discriminator(self, image, y=None, reuse=False):
# with tf.variable_scope("discriminator") as scope:
# if reuse:
# scope.reuse_variables()
#
## if True: #np.mod(s, 16) == 0:
### h0 = self.d_bn0(image)
### h0 = h0 + lrelu(conv2d(h0, self.c_dim, name='d_h0_conv', d_h=1, d_w=1))
### h1 = self.d_bn1(h0, train=True)
### h1 = h1 + lrelu(conv2d(h1, self.c_dim, name='d_h1_conv', d_h=1, d_w=1))
### h2 = self.d_bn2(h1, train=True)
### h2 = h2 + lrelu(conv2d(h2, self.c_dim, name='d_h2_conv', d_h=1, d_w=1))
### h3 = self.d_bn3(h2, train=True)
### h3 = h3 + lrelu(conv2d(h3, self.c_dim, name='d_h3_conv', d_h=1, d_w=1))
## return linear(tf.reshape(image, [self.batch_size, -1]), self.df_dim, 'd_h4_lin')
#
# s = self.df_dim
# ch = np.ceil(self.output_size/16) ** 2
# s0, s1, s2, s3 = max(1, int(s/(ch*8))), max(1, int(s/(ch*4))), \
# max(1, int(s/(ch*2))), max(1, int(s/ch))
# h0 = lrelu(self.d_bn0(conv2d(image, s0, name='d_h0_conv')))
# h1 = lrelu(self.d_bn1(conv2d(h0, s1, name='d_h1_conv')))
# h2 = lrelu(self.d_bn2(conv2d(h1, s2, name='d_h2_conv')))
# h3 = lrelu(self.d_bn3(conv2d(h2, s3, name='d_h3_conv')))
## h4 = linear(tf.reshape(h3, [self.batch_size, -1]), self.df_dim, 'd_h3_lin')
# return tf.reshape(h3, [self.batch_size, -1])
def train_step(self, config, batch_images=None):
batch_z = np.random.uniform(
-1, 1, [config.batch_size, self.z_dim]).astype(np.float32)
write_summary = ((np.mod(self.counter, 50) == 0) and (self.counter < 1000)) \
or (np.mod(self.counter, 1000) == 0) or (self.err_counter > 0)
# write_summary = True
if self.config.use_kernel:
feed_dict = {self.lr: self.current_lr, self.z: batch_z}
if batch_images is not None:
feed_dict.update({self.images: batch_images})
eval_ops = [self.global_step, self.g_loss, self.d_loss]
if (self.config.kernel != '') and ('optme' not in self.config.model) and ('lsun' not in self.config.dataset):
feed_dict.update({self.me_test_images: self.additional_sample_images})
if self.config.is_demo:
summary_str, step, g_loss, d_loss = self.sess.run(
[self.TrainSummary] + eval_ops,
feed_dict=feed_dict
)
else:
if self.d_counter == 0:
if write_summary:
_, summary_str, step, g_loss, d_loss = self.sess.run(
[self.g_grads, self.TrainSummary] + eval_ops,
feed_dict=feed_dict
)
else:
_, step, g_loss, d_loss = self.sess.run(
[self.g_grads] + eval_ops,
feed_dict=feed_dict
)
else:
_, step, g_loss, d_loss = self.sess.run(
[self.d_grads] + eval_ops, feed_dict=feed_dict
)
if self.d_counter == 0:
if write_summary:
try:
self.writer.add_summary(summary_str, step)
self.err_counter = 0
except Exception as e:
print('Step %d summary exception. ' % self.counter, e)
self.err_counter += 1
print("Epoch: [%2d] time: %4.4f, %s, G: %.8f, D: %.8f"
% (self.counter, time.time() - self.start_time,
self.optim_name, g_loss, d_loss))
if (np.mod(self.counter, self.config.max_iteration//5) == 0):
self.current_lr *= self.config.decay_rate
print('current learning rate: %f' % self.current_lr)
if ('decay_gp' in self.config.suffix) and (self.config.gradient_penalty > 0):
self.gp *= self.config.decay_rate
print('current gradeint penalty: %f' % self.sess.run(self.gp))
if self.counter == 1:
print('current learning rate: %f' % self.current_lr)
if self.d_grads is not None:
d_steps = self.config.dsteps
if ((self.counter % 100 == 0) or (self.counter < 20)):
d_steps = self.config.start_dsteps
self.d_counter = (self.d_counter + 1) % (d_steps + 1)
self.counter += (self.d_counter == 0)
return g_loss, d_loss
| |
from collections import Counter
import json
import base64
import random
import re
from django.core.files.storage import get_storage_class
from django.db import models
from django.db.models import (
DateTimeField, TextField, CharField, ForeignKey, IntegerField,
BooleanField, F, ManyToManyField, OneToOneField, FloatField,
FileField
)
from django.utils import timezone
from django.db import transaction
from uuid import uuid4
import sqlparse
from django.utils.safestring import mark_safe
from silk.utils.profile_parser import parse_profile
from silk.config import SilkyConfig
silk_storage = get_storage_class(SilkyConfig().SILKY_STORAGE_CLASS)()
# Seperated out so can use in tests w/o models
def _time_taken(start_time, end_time):
d = end_time - start_time
return d.seconds * 1000 + d.microseconds / 1000
def time_taken(self):
return _time_taken(self.start_time, self.end_time)
class CaseInsensitiveDictionary(dict):
def __getitem__(self, key):
return super(CaseInsensitiveDictionary, self).__getitem__(key.lower())
def __setitem__(self, key, value):
super(CaseInsensitiveDictionary, self).__setitem__(key.lower(), value)
def update(self, other=None, **kwargs):
for k, v in other.items():
self[k] = v
for k, v in kwargs.items():
self[k] = v
def __init__(self, d):
super(CaseInsensitiveDictionary, self).__init__()
for k, v in d.items():
self[k] = v
class Request(models.Model):
id = CharField(max_length=36, default=uuid4, primary_key=True)
path = CharField(max_length=190, db_index=True)
query_params = TextField(blank=True, default='')
raw_body = TextField(blank=True, default='')
body = TextField(blank=True, default='')
method = CharField(max_length=10)
start_time = DateTimeField(default=timezone.now, db_index=True)
view_name = CharField(
max_length=190, db_index=True, blank=True,
default='', null=True
)
end_time = DateTimeField(null=True, blank=True)
time_taken = FloatField(blank=True, null=True)
encoded_headers = TextField(blank=True, default='') # stores json
meta_time = FloatField(null=True, blank=True)
meta_num_queries = IntegerField(null=True, blank=True)
meta_time_spent_queries = FloatField(null=True, blank=True)
pyprofile = TextField(blank=True, default='')
prof_file = FileField(max_length=300, blank=True, storage=silk_storage)
# Useful method to create shortened copies of strings without losing start and end context
# Used to ensure path and view_name don't exceed 190 characters
def _shorten(self, string):
return '%s...%s' % (string[:94], string[len(string) - 93:])
@property
def total_meta_time(self):
return (self.meta_time or 0) + (self.meta_time_spent_queries or 0)
@property
def profile_table(self):
for n, columns in enumerate(parse_profile(self.pyprofile)):
location = columns[-1]
if n and '{' not in location and '<' not in location:
r = re.compile('(?P<src>.*\.py)\:(?P<num>[0-9]+).*')
m = r.search(location)
group = m.groupdict()
src = group['src']
num = group['num']
name = 'c%d' % n
fmt = '<a name={name} href="?pos={n}&file_path={src}&line_num={num}#{name}">{location}</a>'
rep = fmt.format(**dict(group, **locals()))
yield columns[:-1] + [mark_safe(rep)]
else:
yield columns
# defined in atomic transaction within SQLQuery save()/delete() as well
# as in bulk_create of SQLQueryManager
# TODO: This is probably a bad way to do this, .count() will prob do?
num_sql_queries = IntegerField(default=0) # TODO replace with count()
@property
def time_spent_on_sql_queries(self):
"""
TODO: Perhaps there is a nicer way to do this with Django aggregates?
My initial thought was to perform:
SQLQuery.objects.filter.aggregate(Sum(F('end_time')) - Sum(F('start_time')))
However this feature isnt available yet, however there has been talk
for use of F objects within aggregates for four years
here: https://code.djangoproject.com/ticket/14030. It looks
like this will go in soon at which point this should be changed.
"""
return sum(x.time_taken for x in SQLQuery.objects.filter(request=self))
@property
def headers(self):
if self.encoded_headers:
raw = json.loads(self.encoded_headers)
else:
raw = {}
return CaseInsensitiveDictionary(raw)
@property
def content_type(self):
return self.headers.get('content-type', None)
@classmethod
def garbage_collect(cls, force=False):
""" Remove Request/Responses when we are at the SILKY_MAX_RECORDED_REQUESTS limit
Note that multiple in-flight requests may call this at once causing a
double collection """
check_percent = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS_CHECK_PERCENT
check_percent /= 100.0
if check_percent < random.random() and not force:
return
target_count = SilkyConfig().SILKY_MAX_RECORDED_REQUESTS
# Since garbage collection is probabilistic, the target count should
# be lowered to account for requests before the next garbage collection
if check_percent != 0:
target_count -= int(1 / check_percent)
# Make sure we can delete everything if needed by settings
if target_count <= 0:
cls.objects.all().delete()
return
try:
time_cutoff = cls.objects.order_by(
'-start_time'
).values_list(
'start_time',
flat=True
)[target_count]
except IndexError:
return
cls.objects.filter(start_time__lte=time_cutoff).delete()
def save(self, *args, **kwargs):
# sometimes django requests return the body as 'None'
if self.raw_body is None:
self.raw_body = ''
if self.body is None:
self.body = ''
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
# We can't save if either path or view_name exceed 190 characters
if self.path and len(self.path) > 190:
self.path = self._shorten(self.path)
if self.view_name and len(self.view_name) > 190:
self.view_name = self._shorten(self.view_name)
super(Request, self).save(*args, **kwargs)
Request.garbage_collect(force=False)
class Response(models.Model):
id = CharField(max_length=36, default=uuid4, primary_key=True)
request = OneToOneField(
Request, related_name='response', db_index=True,
on_delete=models.CASCADE,
)
status_code = IntegerField()
raw_body = TextField(blank=True, default='')
body = TextField(blank=True, default='')
encoded_headers = TextField(blank=True, default='')
@property
def content_type(self):
return self.headers.get('content-type', None)
@property
def headers(self):
if self.encoded_headers:
raw = json.loads(self.encoded_headers)
else:
raw = {}
return CaseInsensitiveDictionary(raw)
@property
def raw_body_decoded(self):
return base64.b64decode(self.raw_body)
# TODO rewrite docstring
class SQLQueryManager(models.Manager):
def bulk_create(self, *args, **kwargs):
"""ensure that num_sql_queries remains consistent. Bulk create does not call
the model save() method and hence we must add this logic here too"""
if len(args):
objs = args[0]
else:
objs = kwargs.get('objs')
with transaction.atomic():
request_counter = Counter([x.request_id for x in objs])
requests = Request.objects.filter(pk__in=request_counter.keys())
# TODO: Not that there is ever more than one request (but there could be eventually)
# but perhaps there is a cleaner way of apply the increment from the counter without iterating
# and saving individually? e.g. bulk update but with diff. increments. Couldn't come up with this
# off hand.
for r in requests:
r.num_sql_queries = F('num_sql_queries') + request_counter[r.pk]
r.save()
return super(SQLQueryManager, self).bulk_create(*args, **kwargs)
class SQLQuery(models.Model):
query = TextField()
start_time = DateTimeField(null=True, blank=True, default=timezone.now)
end_time = DateTimeField(null=True, blank=True)
time_taken = FloatField(blank=True, null=True)
request = ForeignKey(
Request, related_name='queries', null=True,
blank=True, db_index=True, on_delete=models.CASCADE,
)
traceback = TextField()
objects = SQLQueryManager()
# TODO docstring
@property
def traceback_ln_only(self):
return '\n'.join(self.traceback.split('\n')[::2])
@property
def formatted_query(self):
return sqlparse.format(self.query, reindent=True, keyword_case='upper')
# TODO: Surely a better way to handle this? May return false positives
@property
def num_joins(self):
return self.query.lower().count('join ')
@property
def tables_involved(self):
"""
A really another rudimentary way to work out tables involved in a
query.
TODO: Can probably parse the SQL using sqlparse etc and pull out table
info that way?
"""
components = [x.strip() for x in self.query.split()]
tables = []
for idx, component in enumerate(components):
# TODO: If django uses aliases on column names they will be falsely
# identified as tables...
if component.lower() == 'from' or component.lower() == 'join' or component.lower() == 'as':
try:
_next = components[idx + 1]
if not _next.startswith('('): # Subquery
stripped = _next.strip().strip(',')
if stripped:
tables.append(stripped)
except IndexError: # Reach the end
pass
return tables
@transaction.atomic()
def save(self, *args, **kwargs):
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
if not self.pk:
if self.request:
self.request.num_sql_queries += 1
self.request.save(update_fields=['num_sql_queries'])
super(SQLQuery, self).save(*args, **kwargs)
@transaction.atomic()
def delete(self, *args, **kwargs):
self.request.num_sql_queries -= 1
self.request.save()
super(SQLQuery, self).delete(*args, **kwargs)
class BaseProfile(models.Model):
name = CharField(max_length=300, blank=True, default='')
start_time = DateTimeField(default=timezone.now)
end_time = DateTimeField(null=True, blank=True)
request = ForeignKey(
Request, null=True, blank=True, db_index=True,
on_delete=models.CASCADE,
)
time_taken = FloatField(blank=True, null=True)
class Meta:
abstract = True
def save(self, *args, **kwargs):
if self.end_time and self.start_time:
interval = self.end_time - self.start_time
self.time_taken = interval.total_seconds() * 1000
super(BaseProfile, self).save(*args, **kwargs)
class Profile(BaseProfile):
file_path = CharField(max_length=300, blank=True, default='')
line_num = IntegerField(null=True, blank=True)
end_line_num = IntegerField(null=True, blank=True)
func_name = CharField(max_length=300, blank=True, default='')
exception_raised = BooleanField(default=False)
queries = ManyToManyField(SQLQuery, related_name='profiles', db_index=True)
dynamic = BooleanField(default=False)
@property
def is_function_profile(self):
return self.func_name is not None
@property
def is_context_profile(self):
return self.func_name is None
@property
def time_spent_on_sql_queries(self):
return sum(x.time_taken for x in self.queries.all())
| |
"""
Spatial geometric objects.
:Classes:
:`Geometry`: base class for all geometries
:`Point`: (x, y, z) point
:`Point2`: pair of `Point` instances that define a line, or box, etc.
:`PPoint`: multiple `Point` instances
:`Mesh`: mesh surface made up of triangular faces defined by verticies
.. note::
Regression tests provide usage examples:
`geometry tests <https://github.com/GeosoftInc/gxpy/blob/master/geosoft/gxpy/tests/test_geometry.py>`_
"""
import numpy as np
from collections.abc import Sequence
import geosoft
import geosoft.gxapi as gxapi
from . import coordinate_system as gxcs
from . import vv as gxvv
__version__ = geosoft.__version__
def _t(s):
return geosoft.gxpy.system.translate(s)
def _geo_cs(g, geo_class, coordinate_system, **kwargs):
if hasattr(g, 'coordinate_system') and g.coordinate_system == coordinate_system:
return g
return geo_class(g, coordinate_system, **kwargs)
def first_coordinate_system(geo_objects):
"""
Return the first found known coordinate system in the list
:param geo_objects: objects as iterable
:return: valid coordinate system or `None` if none found
.. versionadded:: 9.3.1
"""
for o in geo_objects:
if hasattr(o, 'coordinate_system'):
if gxcs.is_known(o.coordinate_system):
return o.coordinate_system
return None
class GeometryException(geosoft.GXRuntimeError):
"""
Exceptions from :mod:`geosoft.gxpy.geometry`.
"""
pass
def extent_union(g1, g2):
"""
Return the spatial union of two spatial objects.
:param g1: extent (p0 < p1), returned extent will be in this coordinate system
:param g2: second object
:return: `Point2` instance in the coordinate system of g1
.. versionadded:: 9.3.1
"""
def ext(g):
if g is None or isinstance(g, Point2):
return g
if isinstance(g, Geometry):
return g.extent
return Point2(g).extent
g1 = ext(g1)
g2 = ext(g2)
if g1 is None:
return g2
if g2 is None:
return g1
g2p0x, g2p0y, g2p0z = g2.p0.xyz
g2p1x, g2p1y, g2p1z = g2.p1.xyz
if g1.coordinate_system != g2.coordinate_system:
corners = np.array([(g2p0x, g2p0y, g2p0z),
(g2p0x, g2p1y, g2p0z),
(g2p1x, g2p1y, g2p0z),
(g2p1x, g2p0y, g2p0z),
(g2p0x, g2p0y, g2p1z),
(g2p0x, g2p1y, g2p1z),
(g2p1x, g2p1y, g2p1z),
(g2p1x, g2p0y, g2p1z)], dtype=np.float64)
ex = PPoint(PPoint(corners, g2.coordinate_system), g1.coordinate_system).extent
return extent_union(g1, ex)
g1p0x, g1p0y, g1p0z = g1.p0.xyz
g1p1x, g1p1y, g1p1z = g1.p1.xyz
if g2p0x >= g1p0x and g2p0y >= g1p0y and g2p0z >= g1p0z and\
g2p1x <= g1p1x and g2p1y <= g1p1y and g2p1z <= g1p1z:
return g1
min_x = g1p0x if g1p0x < g2p0x else g2p0x
min_y = g1p0y if g1p0y < g2p0y else g2p0y
min_z = g1p0z if g1p0z < g2p0z else g2p0z
max_x = g1p1x if g1p1x > g2p1x else g2p1x
max_y = g1p1y if g1p1y > g2p1y else g2p1y
max_z = g1p1x if g1p1z > g2p1z else g2p1z
return Point2(((min_x, min_y, min_z), (max_x, max_y, max_z)), g1.coordinate_system)
class Geometry:
"""
Geometry base class for all geometries and spatial objects in Geosoft.
:param coordinate_system: `geosoft.gxpy.coordinate_system.Coordinate_system` instance.
:param name: instance name string
:param gxobj: optional gxapi instance that can satisfy get_ipj() and/or get_extent()
:Properties:
:`Geometry.name`: name for the geometry
:`Geometry.coordinate_system`: spatial coordinate system of the x, y, z locations
:`Geometry.extent`: spatial extent as a `Point2`
:`Geometry.extent_xyz`: (min_x, min_y, min_z, max_x, max_y, max_z)
:`Geometry.extent_xy`: (min_x, min_y, max_x, max_y)
:`Geometry.dimension`: (dx, dy, dz) dimension
:`Geometry.dimension_xy`: (dx, dy) dimension
:`Geometry.centroid`: center point as a `Point`
:`Geometry.centroid_xyz`: (x, y, z) location of the object center
:`Geometry.centroid_xy`: (x, y) center
.. versionadded:: 9.2
"""
def __enter__(self):
return self
def __exit__(self, xtype, xvalue, xtraceback):
pass
def __repr__(self):
return "{}({})".format(self.__class__, self.__dict__)
def __init__(self, coordinate_system=None, name=None, gxobj=None):
if name is None:
name = '_geometry_'
self._cs = coordinate_system
self._name = name
self._gxobj = gxobj
def __eq__(self, other):
if self.coordinate_system != other.coordinate_system:
return False
if self._gxobj != other.gxobj:
return False
return True
@property
def coordinate_system(self):
"""`geosoft.gxpy.coordinate_system.Coordinate_system` instance or None. Can be set."""
if self._cs and not isinstance(self._cs, gxcs.Coordinate_system):
self._cs = gxcs.Coordinate_system(self._cs)
if self._gxobj and hasattr(self._gxobj, 'get_ipj'):
ipj = gxapi.GXIPJ.create()
self._gxobj.get_ipj(ipj)
self._cs = gxcs.Coordinate_system(ipj)
return self._cs
@coordinate_system.setter
def coordinate_system(self, cs):
if cs and self._gxobj and hasattr(self._gxobj, 'set_ipj'):
if not isinstance(cs, gxcs.Coordinate_system):
cs = gxcs.Coordinate_system(cs)
self._gxobj.set_ipj(cs.gxipj)
self._cs = cs
@property
def gxobj(self):
"""An associated gxapi object, or None."""
return self._gxobj
@property
def name(self):
"""Spatial object name, can be set."""
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def extent(self):
""" Object extent as a `Point2` instance."""
if self._gxobj and hasattr(self._gxobj, 'get_extents'):
rx0 = gxapi.float_ref()
ry0 = gxapi.float_ref()
rz0 = gxapi.float_ref()
rx1 = gxapi.float_ref()
ry1 = gxapi.float_ref()
rz1 = gxapi.float_ref()
self._gxobj.get_extents(rx0, ry0, rz0, rx1, ry1, rz1)
cs = self.coordinate_system
return Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)), cs)
else:
return None
@property
def extent_xyz(self):
"""Object extent as a tuple (xmin, ymin, zmin, xmax, ymax, zmax)."""
e = self.extent
if e is None:
return None, None, None, None, None, None
return e[0].x, e[0].y, e[0].z, e[1].x, e[1].y, e[1].z
@property
def extent_xy(self):
""" Horizontal extent as a tuple (min_x, min_y, max_x, max_y)."""
e = self.extent
if e is None:
return None, None, None, None
return e[0].x, e[0].y, e[1].x, e[1].y
@property
def extent_minimum(self):
"""Minimum geometry extent as `Point` instance."""
if self.extent is None:
return None
return self.extent[0]
@property
def extent_maximum(self):
"""Maximum geometry extent as `Point` instance."""
if self.extent is None:
return None
return self.extent[1]
@property
def extent_minimum_xyz(self):
"""Minimum geometry extent as tuple (x, y, z)."""
e = self.extent
if e is None:
return None, None, None
p = e[0]
return p.x, p.y, p.z
@property
def extent_maximum_xyz(self):
"""Maximum geometry extent as tuple (x, y, z)."""
e = self.extent
if e is None:
return None, None, None
p = e[1]
return p.x, p.y, p.z
@property
def extent_minimum_xy(self):
"""Minimum horizontal extent as tuple (min_x, min_y)."""
e = self.extent
if e is None:
return None, None
p = e[0]
return p.x, p.y
@property
def extent_maximum_xy(self):
"""Maximum horizontal extent as tuple (max_x, max_y)."""
e = self.extent
if e is None:
return None, None
p = e[1]
return p.x, p.y
@property
def centroid(self):
"""Object centroid as a `Point` instance."""
e = self.extent
if e is None:
return None
cx = (e[0].x + e[1].x) * 0.5
cy = (e[0].y + e[1].y) * 0.5
cz = (e[0].z + e[1].z) * 0.5
return Point((cx, cy, cz), e.coordinate_system)
@property
def dimension(self):
"""Object dimensions as tuple (dx, dy, dz)"""
e = self.extent
if e is None:
return None, None, None
dx = abs(e[1].x - e[0].x)
dy = abs(e[1].y - e[0].y)
dz = abs(e[1].z - e[0].z)
return dx, dy, dz
@property
def centroid_xy(self):
"""Horizontal centroid as a tuple (x, y)."""
c = self.centroid
if c is None:
return None, None
return c.x, c.y
@property
def centroid_xyz(self):
"""Horizontal centroid as a tuple (x, y, z)."""
c = self.centroid
if c is None:
return None, None, None
return c.x, c.y, c.z
@property
def dimension_xy(self):
"""Horizontal dimension as a tuple (dx, dy)."""
dx, dy, _ = self.dimension
if dx is None:
return None, None
return dx, dy
class Point(Geometry, Sequence):
"""
Spatial location (x,y,z). Basic instance arithmetic and equality testing is supported.
:param p: point in one of the following forms:
`Point` instance, returns a copy
(x, y [,z]) implied z is as defined by z=
k makes a point (k, k, k)
:param coordinate_system: coordinate system or None
:param z: implied z if len(p) is 2.
:param kwargs: passed to base class `Geometry`
Iterates on [x, y, z]
Operators supported: = + - * /
.. versionadded:: 9.2
.. versionchanged:: 9.3.1 added coordinate_system parameter
"""
def __str__(self):
return "{}({}, {}, {})".format(self.name, self.x, self.y, self.z)
def __init__(self, p, coordinate_system=None, name=None, z=0., **kwargs):
if name is None:
name = '_point_'
super().__init__(coordinate_system=coordinate_system, name=name, **kwargs)
if isinstance(p, Point):
if coordinate_system is None:
coordinate_system = p.coordinate_system
super().__init__(coordinate_system=coordinate_system, name=name, **kwargs)
if coordinate_system != p.coordinate_system:
self.p = gxcs.Coordinate_translate(p.coordinate_system, coordinate_system).convert(p.p)
else:
self.p = p.p.copy()
else:
super().__init__(coordinate_system=coordinate_system, name=name, **kwargs)
if isinstance(p, np.ndarray):
if len(p) > 2:
self.p = p[:3].copy()
else:
self.p = np.empty(3)
self.p[:2] = p
self.p[2] = z
elif hasattr(p, '__len__'):
lp = len(p)
if lp == 1:
v = float(p[0])
self.p = np.array((v, v, v), dtype=np.float)
else:
self.p = np.empty(3)
if lp == 2:
self.p[0] = float(p[0]) if p[0] is not None else np.nan
self.p[1] = float(p[1]) if p[1] is not None else np.nan
self.p[2] = z
else:
self.p[0] = float(p[0]) if p[0] is not None else np.nan
self.p[1] = float(p[1]) if p[1] is not None else np.nan
self.p[2] = float(p[2]) if p[2] is not None else np.nan
else:
p = float(p)
self.p = np.array((p, p, p))
self._next = 0
def __len__(self):
return 1
def __iter__(self):
return self
def __next__(self):
if self._next >= 3:
self._next = 0
raise StopIteration
else:
item = self._next
self._next += 1
return self.p[item]
def __getitem__(self, item):
return self.p[item]
def __add__(self, p):
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point(self.p + p.p, self.coordinate_system)
def __sub__(self, p):
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point(self.p - p.p, self.coordinate_system)
def __neg__(self):
return Point(-self.p, coordinate_system=self.coordinate_system)
def __mul__(self, p):
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point(self.p * p.p, self.coordinate_system)
def __truediv__(self, p):
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point(self.p / p.p, self.coordinate_system)
def __eq__(self, other):
if not super(Point, self).__eq__(other):
return False
return np.array_equal(self.p, other.p)
@property
def x(self):
""" x value, can be set"""
return self.p[0]
@x.setter
def x(self, value):
self.p[0] = float(value)
@property
def y(self):
""" y value, can be set"""
return self.p[1]
@y.setter
def y(self, value):
self.p[1] = float(value)
@property
def z(self):
""" z value, can be set"""
return self.p[2]
@z.setter
def z(self, value):
self.p[2] = float(value)
@property
def xy(self):
""" (x, y), can be set"""
return self.p[0], self.p[1]
@xy.setter
def xy(self, xy):
self.p[0] = float(xy[0])
self.p[1] = float(xy[1])
@property
def xyz(self):
""" (x, y, z), can be set"""
return self.p[0], self.p[1], self.p[2]
@xyz.setter
def xyz(self, xyz):
self.p[0] = float(xyz[0])
self.p[1] = float(xyz[1])
self.p[2] = float(xyz[2])
@property
def extent(self):
return Point2((self, self))
@property
def pp(self):
"""Point as a numpy array shaped (1, 3)"""
return self.p.reshape((1, 3))
def copy(self):
"""Return a copy"""
return Point(self)
class Point2(Geometry, Sequence):
"""
Two points, for a line, or a rectangle, or a cube. Basic instance arithmetic and equality testing is supported.
:param p: Points in one of the following forms:
`Point2` makes a copy in the required coordinate system
(`Point`, `Point`)
(x, y [, z]) two points at the same location
((x, y [, z]), (x, y [, z]))
(x0, y0, x1, y1) implied z is 0
(x0, y0, z0, x1, y1, z1)
:param coordinate_system: coordinate system or None
:param z: implied z value when only (x, y) is passed
:param kwargs: passed to base class `Geometry`
Iterates on two points [p0, p1].
Operators supported: = + - * /
Second operand may be a `Point2` or a `Point`.
.. versionadded:: 9.2
.. versionchanged:: 9.3.1 added coordinate_system parameter
"""
def __str__(self):
return "{}[({}, {}, {}) ({}, {}, {})]".format(self.name, self.p0.x, self.p0.y, self.p0.z,
self.p1.x, self.p1.y, self.p1.z)
def __init__(self, p, coordinate_system=None, name=None, z=0, **kwargs):
if name is None:
name = '_point2_'
super().__init__(coordinate_system=coordinate_system, name=name, **kwargs)
if isinstance(p, Point):
if coordinate_system is None:
coordinate_system = p.coordinate_system
self.p0 = self.p1 = Point(p, coordinate_system=coordinate_system)
elif isinstance(p, Point2):
if coordinate_system is None:
coordinate_system = p.coordinate_system
self.p0 = Point(p.p0, coordinate_system=coordinate_system)
self.p1 = Point(p.p1, coordinate_system=coordinate_system)
else:
if not hasattr(p, '__iter__'):
self.p0 = self.p1 = Point(p, coordinate_system, z=z)
elif len(p) == 2:
if coordinate_system is None:
coordinate_system = first_coordinate_system((p[0], p[1]))
if hasattr(p[0], '__iter__'):
self.p0 = Point(p[0], coordinate_system, z=z)
self.p1 = Point(p[1], coordinate_system, z=z)
else:
self.p0 = Point(p, coordinate_system, z=z)
self.p1 = Point(self.p0)
elif len(p) == 3:
self.p0 = self.p1 = Point((p[0], p[1], p[2]), coordinate_system, z=z)
elif len(p) == 4:
self.p0 = Point((p[0], p[1]), coordinate_system, z=z)
self.p1 = Point((p[2], p[3]), coordinate_system, z=z)
elif len(p) == 6:
self.p0 = Point((p[0], p[1], p[2]), coordinate_system, z=z)
self.p1 = Point((p[3], p[4], p[5]), coordinate_system, z=z)
else:
raise GeometryException(_t('Invalid points: {}').format(p))
self.coordinate_system = coordinate_system
self._next = 0
def __len__(self):
return 2
def __iter__(self):
return self
def __next__(self):
if self._next >= 2:
self._next = 0
raise StopIteration
else:
if self._next:
p = self.p1
else:
p = self.p0
self._next += 1
return p
def __getitem__(self, item):
if item == 0:
return self.p0
elif item == 1:
return self.p1
else:
raise IndexError
def __eq__(self, other):
if not super(Point2, self).__eq__(other):
return False
return (self.p0 == other.p0) and (self.p1 == other.p1) or (self.p0 == other.p1) and (self.p1 == other.p0)
def __add__(self, p):
if isinstance(p, Point2):
p = _geo_cs(p, Point2, self.coordinate_system)
return Point2((self.p0 + p.p0, self.p1 + p.p1), coordinate_system=self.coordinate_system)
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point2((self.p0 + p, self.p1 + p), coordinate_system=self.coordinate_system)
def __sub__(self, p):
if isinstance(p, Point2):
p = _geo_cs(p, Point2, self.coordinate_system)
return Point2((self.p0 - p.p0, self.p1 - p.p1), coordinate_system=self.coordinate_system)
if not isinstance(p, Point):
p = Point(p)
else:
p = _geo_cs(p, Point, self.coordinate_system)
return Point2((self.p0 - p, self.p1 - p), coordinate_system=self.coordinate_system)
def __neg__(self):
return Point2((-self.p0, -self.p1), coordinate_system=self.coordinate_system)
def __mul__(self, p):
if isinstance(p, Point2):
p = _geo_cs(p, Point2, self.coordinate_system)
return Point2((self.p0 * p.p0, self.p1 * p.p1), coordinate_system=self.coordinate_system)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
else:
p = Point(p)
return Point2((self.p0 * p, self.p1 * p), coordinate_system=self.coordinate_system)
def __truediv__(self, p):
if isinstance(p, Point2):
p = _geo_cs(p, Point2, self.coordinate_system)
return Point2((self.p0 / p.p0, self.p1 / p.p1), coordinate_system=self.coordinate_system)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
else:
p = Point(p)
return Point2((self.p0 / p, self.p1 / p), coordinate_system=self.coordinate_system)
@property
def x2(self):
"""(x0, x1), can be set"""
return self.p0.x, self.p1.x
@x2.setter
def x2(self, value):
self.p0.x = value[0]
self.p1.x = value[1]
@property
def y2(self):
""" (y0, y1), can be set"""
return self.p0.y, self.p1.y
@y2.setter
def y2(self, value):
self.p0.y = value[0]
self.p1.y = value[1]
@property
def z2(self):
""" (z0, z1), can be set"""
return self.p0.z, self.p1.z
@z2.setter
def z2(self, value):
self.p0.z = value[0]
self.p1.z = value[1]
@property
def extent(self):
"""Extent as (xmin, ymin, zmin, xmax, ymax, zmax)"""
p1 = Point((min(self.p0.x, self.p1.x), min(self.p0.y, self.p1.y), min(self.p0.z, self.p1.z)),
self.coordinate_system)
p2 = Point((max(self.p0.x, self.p1.x), max(self.p0.y, self.p1.y), max(self.p0.z, self.p1.z)),
self.coordinate_system)
return Point2((p1, p2), self.coordinate_system)
def copy(self):
"""Return a copy"""
return Point2(self)
@property
def pp(self):
"""Point2 as a numpy array shaped (2, 3)"""
pp = np.empty((2, 3), dtype=np.float64)
pp[0] = self.p0.p
pp[1] = self.p1.p
return pp
class PPoint(Geometry, Sequence):
"""
Poly-Point class. Basic instance arithmetic and equality testing is supported.
:param xyz: array-like: (p1, p2, ...), ((x, y), ...), ((x, y, z), ...) or (vv_x, vv_y, [vv_z]).
vv data is resampled to match the first vv.
:param coordinate_system: coordinate system or `None`
:param z: constant z value for (x, y) data, ignored for (x, y, z) data
:param kwargs: passed to base class `Geometry`
Operators supported: = + - * /
.. versionadded:: 9.2
.. versionchanged:: 9.3.1 added coordinate_system parameter
"""
def __str__(self):
return "{}({} points)".format(self.name, len(self))
def __init__(self, xyz, coordinate_system=None, z=0.0, name=None, **kwargs):
if name is None:
name = '_ppoint_'
super().__init__(coordinate_system=coordinate_system, name=name, **kwargs)
def blankpp(length):
pp = np.empty(length * 3, dtype=np.float).reshape((length, 3))
pp.fill(np.nan)
pp[:, 2] = z
return pp
def np_setup(npxyz):
pp = blankpp(npxyz.shape[0])
pp[:, 0] = npxyz[:, 0]
pp[:, 1] = npxyz[:, 1]
if npxyz.shape[1] > 2:
pp[:, 2] = npxyz[:, 2]
else:
pp[:, 2] = z
return pp
def vv_setup():
pp = blankpp(xyz[0].length)
pp[:, 0] = xyz[0].get_data()[0][:]
xyz[1].refid(xyz[0].fid, pp.shape[0])
pp[:, 1] = xyz[1].get_data()[0][:]
if len(xyz) > 2:
xyz[2].refid(xyz[0].fid, pp.shape[0])
pp[:, 2] = xyz[2].np
else:
pp[:, 2] = z
return pp
def point_setup(_xyz):
pp = blankpp(len(_xyz))
i = 0
if isinstance(_xyz, Point):
_xyz = (_xyz,)
for pt in _xyz:
if isinstance(pt, Point):
pp[i, :] = _geo_cs(pt, Point, coordinate_system, z=z).p
else:
try:
pp[i, :] = pt[:3]
except:
pp[i, :] = _geo_cs(pt, Point, coordinate_system, z=z).p
i += 1
return pp
if isinstance(xyz, np.ndarray):
self.pp = np_setup(xyz)
elif isinstance(xyz[0], gxvv.GXvv):
self.pp = vv_setup()
else:
if coordinate_system is None:
coordinate_system = first_coordinate_system(xyz)
self.pp = point_setup(xyz)
self.coordinate_system = coordinate_system
self._next = 0
@classmethod
def from_list(cls, xyzlist, z=0.0):
"""
.. deprecated:: 9.3 `PPoint` can create directly from a list
"""
return cls(xyzlist, z=z)
def __len__(self):
return self.pp.shape[0]
def __iter__(self):
return self
def __next__(self):
if self._next >= self.pp.shape[0]:
self._next = 0
raise StopIteration
else:
self._next += 1
return self.__getitem__(self._next - 1)
def __getitem__(self, item):
return Point(self.pp[item], self.coordinate_system)
def __add__(self, p):
if isinstance(p, PPoint):
p = _geo_cs(p, PPoint, self.coordinate_system)
return PPoint(self.pp + p.pp)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
return PPoint(self.pp + p.p)
try:
p = Point(p, self.coordinate_system)
return PPoint(self.pp + p.p)
except TypeError:
p = PPoint(p, self.coordinate_system)
return PPoint(self.pp + p.pp)
def __sub__(self, p):
if isinstance(p, PPoint):
p = _geo_cs(p, PPoint, self.coordinate_system)
return PPoint(self.pp - p.pp)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
return PPoint(self.pp - p.p)
return PPoint(self.pp - Point(p).p)
def __neg__(self):
return PPoint(self.pp * -1.0)
def __mul__(self, p):
if isinstance(p, PPoint):
p = _geo_cs(p, PPoint, self.coordinate_system)
return PPoint(self.pp * p.pp)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
return PPoint(self.pp * p.p)
return PPoint(self.pp * Point(p).p)
def __truediv__(self, p):
if isinstance(p, PPoint):
p = _geo_cs(p, PPoint, self.coordinate_system)
return PPoint(self.pp / p.pp)
if isinstance(p, Point):
p = _geo_cs(p, Point, self.coordinate_system)
return PPoint(self.pp / p.p)
return PPoint(self.pp / Point(p).p)
def __eq__(self, other):
if not super(PPoint, self).__eq__(other):
return False
return np.array_equal(self.pp, other.pp)
@classmethod
def merge(cls, pp_list):
"""
Create a `PPoint` from a list of `Point`, 'Point2` or `PPoint` instances or point arrays.
:param pp_list: list of `Point`, 'Point2` or `PPoint` instances or point arrays.
:return: `PPoint` instance that contains all points
.. versionadded:: 9.4
"""
# count points, get first coordinate system
npt = 0
cs = None
for pp in pp_list:
npt += len(pp)
if cs is None and isinstance(pp, Geometry):
cs = pp.coordinate_system
npp = np.zeros((npt, 3))
i = 0
for pp in pp_list:
if not isinstance(pp, Geometry):
pp = PPoint(pp, coordinate_system=cs)
if pp.coordinate_system != cs:
pp = PPoint(pp, coordinate_system=cs)
npp[i:(i+len(pp))] = pp.pp
i += len(pp)
return PPoint(npp, coordinate_system=cs)
@property
def length(self):
"""number of points"""
return self.__len__()
@property
def x(self):
""" x array slice, can be set"""
return self.pp[:, 0]
@x.setter
def x(self, v):
self.pp[:, 0] = v
@property
def y(self):
""" y array slice, can be set"""
return self.pp[:, 1]
@y.setter
def y(self, v):
self.pp[:, 1] = v
@property
def z(self):
""" z array slice, can be set"""
return self.pp[:, 2]
@z.setter
def z(self, v):
self.pp[:, 2] = v
@property
def xy(self):
""" (x, y) array slice, can be set"""
return self.pp[:, 0:2]
@xy.setter
def xy(self, v):
self.pp[:, 0:2] = v
@property
def xyz(self):
""" xyz point array"""
return self.pp
@property
def extent(self):
"""
Volume extent as `Point2` for (min, max).
.. versionadded:: 9.2
"""
p1 = Point((np.nanmin(self.x), np.nanmin(self.y), np.nanmin(self.z)), self.coordinate_system)
p2 = Point((np.nanmax(self.x), np.nanmax(self.y), np.nanmax(self.z)), self.coordinate_system)
return Point2((p1, p2))
def make_xyz_vv(self):
"""
Return x, y and z as a set of :class:`geosoft.gxpy.vv.GXvv`.
:returns: (xvv, yvv, zvv)
.. versionadded:: 9.2
"""
return gxvv.GXvv(self.x), gxvv.GXvv(self.y), gxvv.GXvv(self.z)
def copy(self):
"""Return a copy"""
return PPoint(self)
class Mesh(Geometry, Sequence):
"""
Mesh - set of triangular faces, which are indexes into verticies.
:param mesh: (faces, verticies) that define a trangulated mesh surface. See below.
:param coordinate_system: coordinate system or `None`
:param kwargs: passed to base class `Geometry`
A mesh is a set of triangles, where each triangle has three indexes into a set of verticies.
Verticies are defined by a set of (x, y, z) locations. A Mesh instance can be constructed from
two arrays in the form (faces, verticies), or from two sets of `geosoft.gxpy.vv.GXvv` instances
in the form ((f1vv, f2vv, f3vv), (xvv, yvv, zvv)). In array form, each array is shaped (-1, 3),
with faces being an integer array that references vertexes in the float vertex array.
Operators supported: = + -, where '+' can be used to combine two meshes or add a constant offset.
Iterating yields triangular faces as `PPoint` instances.
:Example:
.. code::
import numpy as np
import geosoft.gxpy.geometry as gxgm
import geosoft.gxpy.vv as gxvv
# create from lists
faces = [[0, 1, 2],
[0, 2, 3],
[3, 2, 4]]
verticies = [[0, 0, 0],
[5, 0, 0],
[5, 5, 0],
[0, 3, 5],
[2.5, 2, 10]]
mesh = gxgm.Mesh((faces, verticies))
# create from numpy arrays
faces = np.array(faces, dtype=np.int32)
verticies = np.array(verticies, dtype=np.float64)
mesh = gxgm.Mesh((faces, verticies))
# create from vv
f1vv, f2vv, f3vv = gxvv.vvset_from_np(faces)
xvv, yvv, zvv = gxvv.vvset_from_np(verticies)
mesh = gxgm.Mesh(((f1vv, f2vv, f3vv), (xvv, yvv, zvv)))
.. versionadded:: 9.3.1
"""
def __str__(self):
return "{}({} faces)".format(self.name, len(self))
def __init__(self, mesh, coordinate_system=None, **kwargs):
if isinstance(mesh, Mesh):
if coordinate_system and coordinate_system != mesh.coordinate_system:
t = gxcs.Coordinate_translate(mesh.coordinate_system, coordinate_system)
verticies = t.convert(mesh.verticies)
else:
verticies = mesh.verticies.copy()
faces = mesh.faces.copy()
else:
faces, verticies = mesh
if isinstance(faces, list):
faces = np.array(faces)
if isinstance(verticies, list):
verticies = np.array(verticies)
if not isinstance(faces, np.ndarray):
f1, f2, f3 = faces
faces = np.empty((len(f1), 3), dtype=np.int32)
faces[:, 0] = f1.np
faces[:, 1] = f2.np
faces[:, 2] = f3.np
else:
faces = faces.copy()
if not isinstance(verticies, np.ndarray):
vx, vy, vz = verticies
verticies = np.empty((len(vx), 3), dtype=np.float64)
verticies[:, 0] = vx.np
verticies[:, 1] = vy.np
verticies[:, 2] = vz.np
else:
verticies = verticies.copy()
# validate faces/verticies
try:
verticies[faces]
except IndexError:
raise GeometryException(_t('Verticies do not support all face indicies'))
if 'name' not in kwargs:
kwargs['name'] = '_mesh_'
super().__init__(coordinate_system=coordinate_system, **kwargs)
self._faces = faces
self._verticies = verticies
self._next = 0
def __len__(self):
return len(self._faces)
def __iter__(self):
return self
def __next__(self):
if self._next >= len(self._faces):
self._next = 0
raise StopIteration
else:
item = self._next
self._next += 1
return self.__getitem__(item)
def __getitem__(self, item):
return PPoint(self._verticies[self._faces[item]], self.coordinate_system)
def __add__(self, m):
if isinstance(m, Mesh):
f2 = np.append(self._faces, m.faces + len(self._verticies), axis=0)
if self.coordinate_system == m.coordinate_system:
v2 = m.verticies
else:
v2 = gxcs.Coordinate_translate(m.coordinate_system, self.coordinate_system).convert(m.verticies)
v2 = np.append(self._verticies, v2, axis=0)
return Mesh((f2, v2), self.coordinate_system)
if hasattr(m, '__iter__'):
dx = m[0]
dy = m[1]
dz = m[2]
else:
dx = dy = dz = float(m)
m = Mesh(self)
m._verticies[:, 0] += dx
m._verticies[:, 1] += dy
m._verticies[:, 2] += dz
return m
def __sub__(self, m):
if hasattr(m, '__iter__'):
dx = m[0]
dy = m[1]
dz = m[2]
else:
dx = dy = dz = float(m)
m = Mesh(self)
m._verticies[:, 0] -= dx
m._verticies[:, 1] -= dy
m._verticies[:, 2] -= dz
return m
def __eq__(self, other):
if not super(Mesh, self).__eq__(other):
return False
if not np.array_equal(self._faces, other.faces):
return False
if not np.array_equal(self._verticies[self._faces], other.verticies[other.faces]):
return False
return True
@property
def faces(self):
"""Faces as an integer numpy array, shape (n_faces, 3)."""
return self._faces
@property
def verticies(self):
"""Verticies as a float numpy array, shape (n_verticies, 3)."""
return self._verticies
@property
def pp(self):
"""Verticies as a numpy array shaped (n_verticies, 3)."""
return self.verticies
@property
def length(self):
"""Number of faces"""
return self.__len__()
@property
def extent(self):
"""
Volume extent as `Point2`.
.. versionadded:: 9.3.1
"""
v = self._verticies[self._faces].reshape((-1, 3))
vx = v[:, 0]
vy = v[:, 1]
vz = v[:, 2]
p1 = Point((np.nanmin(vx), np.nanmin(vy), np.nanmin(vz)), self.coordinate_system)
p2 = Point((np.nanmax(vx), np.nanmax(vy), np.nanmax(vz)), self.coordinate_system)
return Point2((p1, p2))
def point_array(self, unique=True):
"""
Return numpy array of face corner locations.
:param unique: `True` to limit to unique points, otherwise returns all points
by unwinding each face. If unique the order will not be related to the faces.
.. versionadded:: 9.3.1
"""
if unique:
return self._verticies[np.unique(self._faces.flatten())].reshape(-1, 3)
return self._verticies[self._faces].reshape(-1, 3)
def faces_vv(self):
"""Return faces in `geosoft.gxpy.vv.GXvv` tuple (f1vv, f2vv, f3vv)."""
return gxvv.GXvv(self._faces[:, 0], dtype=np.int32),\
gxvv.GXvv(self._faces[:, 1], dtype=np.int32),\
gxvv.GXvv(self._faces[:, 2], dtype=np.int32)
def faces_vv_fast(self):
"""Return faces in list (f1vv, f2vv, f3vv)."""
return [self.faces[:, 0], self.faces[:, 1], self.faces[:, 2]]
def verticies_vv(self):
"""Return verticies in `geosoft.gxpy.vv.GXvv` tuple (xvv, yvv, zvv)."""
return gxvv.GXvv(self._verticies[:, 0], dtype=np.float64),\
gxvv.GXvv(self._verticies[:, 1], dtype=np.float64),\
gxvv.GXvv(self._verticies[:, 2], dtype=np.float64)
def verticies_vv_fast(self):
"""Return verticies in list (xvv, yvv, zvv)."""
return [self._verticies[:, 0], self._verticies[:, 1], self._verticies[:, 2]]
def copy(self):
"""Return a copy"""
return Mesh(self)
| |
import re
from collections import defaultdict
from datetime import timedelta
from email.headerregistry import Address
from typing import Any, Dict, Iterable, List, Optional, Tuple
import html2text
import lxml.html
import pytz
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.auth import get_backends
from django.utils.timezone import now as timezone_now
from django.utils.translation import override as override_language
from django.utils.translation import ugettext as _
from lxml.cssselect import CSSSelector
from confirmation.models import one_click_unsubscribe_link
from zerver.decorator import statsd_increment
from zerver.lib.markdown.fenced_code import FENCE_RE
from zerver.lib.message import bulk_access_messages
from zerver.lib.queue import queue_json_publish
from zerver.lib.send_email import FromAddress, send_future_email
from zerver.lib.types import DisplayRecipientT
from zerver.lib.url_encoding import (
huddle_narrow_url,
personal_narrow_url,
stream_narrow_url,
topic_narrow_url,
)
from zerver.models import (
Message,
Recipient,
Stream,
UserMessage,
UserProfile,
get_context_for_message,
get_display_recipient,
get_user_profile_by_id,
receives_offline_email_notifications,
)
def relative_to_full_url(base_url: str, content: str) -> str:
# Convert relative URLs to absolute URLs.
fragment = lxml.html.fromstring(content)
# We handle narrow URLs separately because of two reasons:
# 1: 'lxml' seems to be having an issue in dealing with URLs that begin
# `#` due to which it doesn't add a `/` before joining the base_url to
# the relative URL.
# 2: We also need to update the title attribute in the narrow links which
# is not possible with `make_links_absolute()`.
for link_info in fragment.iterlinks():
elem, attrib, link, pos = link_info
match = re.match("/?#narrow/", link)
if match is not None:
link = re.sub(r"^/?#narrow/", base_url + "/#narrow/", link)
elem.set(attrib, link)
# Only manually linked narrow URLs have title attribute set.
if elem.get('title') is not None:
elem.set('title', link)
# Inline images can't be displayed in the emails as the request
# from the mail server can't be authenticated because it has no
# user_profile object linked to it. So we scrub the inline image
# container.
inline_image_containers = fragment.find_class("message_inline_image")
for container in inline_image_containers:
container.drop_tree()
# The previous block handles most inline images, but for messages
# where the entire Markdown input was just the URL of an image
# (i.e. the entire body is a message_inline_image object), the
# entire message body will be that image element; here, we need a
# more drastic edit to the content.
if fragment.get('class') == 'message_inline_image':
content_template = '<p><a href="%s" target="_blank" title="%s">%s</a></p>'
image_link = fragment.find('a').get('href')
image_title = fragment.find('a').get('title')
new_content = (content_template % (image_link, image_title, image_link))
fragment = lxml.html.fromstring(new_content)
fragment.make_links_absolute(base_url)
content = lxml.html.tostring(fragment).decode("utf-8")
return content
def fix_emojis(content: str, base_url: str, emojiset: str) -> str:
def make_emoji_img_elem(emoji_span_elem: CSSSelector) -> Dict[str, Any]:
# Convert the emoji spans to img tags.
classes = emoji_span_elem.get('class')
match = re.search(r'emoji-(?P<emoji_code>\S+)', classes)
# re.search is capable of returning None,
# but since the parent function should only be called with a valid css element
# we assert that it does not.
assert match is not None
emoji_code = match.group('emoji_code')
emoji_name = emoji_span_elem.get('title')
alt_code = emoji_span_elem.text
image_url = base_url + f'/static/generated/emoji/images-{emojiset}-64/{emoji_code}.png'
img_elem = lxml.html.fromstring(
f'<img alt="{alt_code}" src="{image_url}" title="{emoji_name}">')
img_elem.set('style', 'height: 20px;')
img_elem.tail = emoji_span_elem.tail
return img_elem
fragment = lxml.html.fromstring(content)
for elem in fragment.cssselect('span.emoji'):
parent = elem.getparent()
img_elem = make_emoji_img_elem(elem)
parent.replace(elem, img_elem)
for realm_emoji in fragment.cssselect('.emoji'):
del realm_emoji.attrib['class']
realm_emoji.set('style', 'height: 20px;')
content = lxml.html.tostring(fragment).decode('utf-8')
return content
def fix_spoilers_in_html(content: str, language: str) -> str:
with override_language(language):
spoiler_title: str = _("Open Zulip to see the spoiler content")
fragment = lxml.html.fromstring(content)
spoilers = fragment.find_class("spoiler-block")
for spoiler in spoilers:
header = spoiler.find_class("spoiler-header")[0]
spoiler_content = spoiler.find_class("spoiler-content")[0]
header_content = header.find("p")
if header_content is None:
# Create a new element to append the spoiler to)
header_content = lxml.html.fromstring("<p></p>")
header.append(header_content)
else:
# Add a space. Its simpler to append a new span element than
# inserting text after the last node ends since neither .text
# and .tail do the right thing for us.
header_content.append(lxml.html.fromstring("<span> </span>"))
span_elem = lxml.html.fromstring(
f'<span class="spoiler-title" title="{spoiler_title}">({spoiler_title})</span')
header_content.append(span_elem)
header.drop_tag()
spoiler_content.drop_tree()
content = lxml.html.tostring(fragment).decode("utf-8")
return content
def fix_spoilers_in_text(content: str, language: str) -> str:
with override_language(language):
spoiler_title: str = _("Open Zulip to see the spoiler content")
lines = content.split('\n')
output = []
open_fence = None
for line in lines:
m = FENCE_RE.match(line)
if m:
fence = m.group('fence')
lang = m.group('lang')
if lang == 'spoiler':
open_fence = fence
output.append(line)
output.append(f"({spoiler_title})")
elif fence == open_fence:
open_fence = None
output.append(line)
elif not open_fence:
output.append(line)
return '\n'.join(output)
def build_message_list(user_profile: UserProfile, messages: List[Message]) -> List[Dict[str, Any]]:
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render: List[Dict[str, Any]] = []
def sender_string(message: Message) -> str:
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def fix_plaintext_image_urls(content: str) -> str:
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def append_sender_to_message(message_plain: str, message_html: str, sender: str) -> Tuple[str, str]:
message_plain = f"{sender}: {message_plain}"
message_soup = BeautifulSoup(message_html, "html.parser")
sender_name_soup = BeautifulSoup(f"<b>{sender}</b>: ", "html.parser")
first_tag = message_soup.find()
if first_tag.name == "p":
first_tag.insert(0, sender_name_soup)
else:
message_soup.insert(0, sender_name_soup)
return message_plain, str(message_soup)
def build_message_payload(message: Message, sender: Optional[str]=None) -> Dict[str, str]:
plain = message.content
plain = fix_plaintext_image_urls(plain)
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage. We can't use `relative_to_full_url()`
# function here because it uses a stricter regex which will not work for
# plain text.
plain = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", plain)
plain = fix_spoilers_in_text(plain, user_profile.default_language)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(user_profile.realm.uri, html)
html = fix_emojis(html, user_profile.realm.uri, user_profile.emojiset)
html = fix_spoilers_in_html(html, user_profile.default_language)
if sender:
plain, html = append_sender_to_message(plain, html, sender)
return {'plain': plain, 'html': html}
def build_sender_payload(message: Message) -> Dict[str, Any]:
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message, sender)]}
def message_header(user_profile: UserProfile, message: Message) -> Dict[str, Any]:
if message.recipient.type == Recipient.PERSONAL:
narrow_link = get_narrow_url(user_profile, message)
header = f"You and {message.sender.full_name}"
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
elif message.recipient.type == Recipient.HUDDLE:
display_recipient = get_display_recipient(message.recipient)
assert not isinstance(display_recipient, str)
narrow_link = get_narrow_url(user_profile, message,
display_recipient=display_recipient)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
header = "You and {}".format(", ".join(other_recipients))
header_html = f"<a style='color: #ffffff;' href='{narrow_link}'>{header}</a>"
else:
stream = Stream.objects.only('id', 'name').get(id=message.recipient.type_id)
narrow_link = get_narrow_url(user_profile, message, stream=stream)
header = f"{stream.name} > {message.topic_name()}"
stream_link = stream_narrow_url(user_profile.realm, stream)
header_html = f"<a href='{stream_link}'>{stream.name}</a> > <a href='{narrow_link}'>{message.topic_name()}</a>"
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.date_sent)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
def get_narrow_url(user_profile: UserProfile, message: Message,
display_recipient: Optional[DisplayRecipientT]=None,
stream: Optional[Stream]=None) -> str:
"""The display_recipient and stream arguments are optional. If not
provided, we'll compute them from the message; they exist as a
performance optimization for cases where the caller needs those
data too.
"""
if message.recipient.type == Recipient.PERSONAL:
assert stream is None
assert display_recipient is None
return personal_narrow_url(
realm=user_profile.realm,
sender=message.sender,
)
elif message.recipient.type == Recipient.HUDDLE:
assert stream is None
if display_recipient is None:
display_recipient = get_display_recipient(message.recipient)
assert display_recipient is not None
assert not isinstance(display_recipient, str)
other_user_ids = [r['id'] for r in display_recipient
if r['id'] != user_profile.id]
return huddle_narrow_url(
realm=user_profile.realm,
other_user_ids=other_user_ids,
)
else:
assert display_recipient is None
if stream is None:
stream = Stream.objects.only('id', 'name').get(id=message.recipient.type_id)
return topic_narrow_url(user_profile.realm, stream, message.topic_name())
def message_content_allowed_in_missedmessage_emails(user_profile: UserProfile) -> bool:
return user_profile.realm.message_content_allowed_in_email_notifications and \
user_profile.message_content_in_email_notifications
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile: UserProfile,
missed_messages: List[Dict[str, Any]],
message_count: int) -> None:
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of dictionaries to Message objects and other data
for a group of messages that share a recipient (and topic)
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = {(msg['message'].recipient_id, msg['message'].topic_name()) for msg in missed_messages}
if len(recipients) != 1:
raise ValueError(
f'All missed_messages must have the same recipient and topic {recipients!r}',
)
# This link is no longer a part of the email, but keeping the code in case
# we find a clean way to add it back in the future
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update({
'name': user_profile.full_name,
'message_count': message_count,
'unsubscribe_link': unsubscribe_link,
'realm_name_in_notifications': user_profile.realm_name_in_notifications,
})
triggers = list(message['trigger'] for message in missed_messages)
unique_triggers = set(triggers)
context.update({
'mention': 'mentioned' in unique_triggers or 'wildcard_mentioned' in unique_triggers,
'stream_email_notify': 'stream_email_notify' in unique_triggers,
'mention_count': triggers.count('mentioned') + triggers.count("wildcard_mentioned"),
})
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update({
'reply_to_zulip': True,
})
else:
context.update({
'reply_to_zulip': False,
})
from zerver.lib.email_mirror import create_missed_message_address
reply_to_address = create_missed_message_address(user_profile, missed_messages[0]['message'])
if reply_to_address == FromAddress.NOREPLY:
reply_to_name = ""
else:
reply_to_name = "Zulip"
narrow_url = get_narrow_url(user_profile, missed_messages[0]['message'])
context.update({
'narrow_url': narrow_url,
})
senders = list({m['message'].sender for m in missed_messages})
if (missed_messages[0]['message'].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0]['message'].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, str)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
context.update({'group_pm': True})
if len(other_recipients) == 2:
huddle_display_name = " and ".join(other_recipients)
context.update({'huddle_display_name': huddle_display_name})
elif len(other_recipients) == 3:
huddle_display_name = f"{other_recipients[0]}, {other_recipients[1]}, and {other_recipients[2]}"
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = "{}, and {} others".format(
', '.join(other_recipients[:2]), len(other_recipients) - 2)
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0]['message'].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
elif (context['mention'] or context['stream_email_notify']):
# Keep only the senders who actually mentioned the user
if context['mention']:
senders = list({m['message'].sender for m in missed_messages
if m['trigger'] == 'mentioned' or
m['trigger'] == 'wildcard_mentioned'})
message = missed_messages[0]['message']
stream = Stream.objects.only('id', 'name').get(id=message.recipient.type_id)
stream_header = f"{stream.name} > {message.topic_name()}"
context.update({
'stream_header': stream_header,
})
else:
raise AssertionError("Invalid messages!")
# If message content is disabled, then flush all information we pass to email.
if not message_content_allowed_in_missedmessage_emails(user_profile):
realm = user_profile.realm
context.update({
'reply_to_zulip': False,
'messages': [],
'sender_str': "",
'realm_str': realm.name,
'huddle_display_name': "",
'show_message_content': False,
'message_content_disabled_by_user': not user_profile.message_content_in_email_notifications,
'message_content_disabled_by_realm': not realm.message_content_allowed_in_email_notifications,
})
else:
context.update({
'messages': build_message_list(user_profile, list(m['message'] for m in missed_messages)),
'sender_str': ", ".join(sender.full_name for sender in senders),
'realm_str': user_profile.realm.name,
'show_message_content': True,
})
with override_language(user_profile.default_language):
from_name: str = _("Zulip missed messages")
from_address = FromAddress.NOREPLY
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
#
# Also, this setting is not really compatible with
# EMAIL_ADDRESS_VISIBILITY_ADMINS.
sender = senders[0]
from_name, from_address = (sender.full_name, sender.email)
context.update({
'reply_to_zulip': False,
})
email_dict = {
'template_prefix': 'zerver/emails/missed_message',
'to_user_ids': [user_profile.id],
'from_name': from_name,
'from_address': from_address,
'reply_to_email': str(Address(display_name=reply_to_name, addr_spec=reply_to_address)),
'context': context}
queue_json_publish("email_senders", email_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id: int,
missed_email_events: Iterable[Dict[str, Any]]) -> None:
message_ids = {event.get('message_id'): event.get('trigger') for event in missed_email_events}
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_email_notifications(user_profile):
return
# Note: This query structure automatically filters out any
# messages that were permanently deleted, since those would now be
# in the ArchivedMessage table, not the Message table.
messages = Message.objects.filter(usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
# We bucket messages by tuples that identify similar messages.
# For streams it's recipient_id and topic.
# For PMs it's recipient id and sender.
messages_by_bucket: Dict[Tuple[int, str], List[Message]] = defaultdict(list)
for msg in messages:
if msg.recipient.type == Recipient.PERSONAL:
# For PM's group using (recipient, sender).
messages_by_bucket[(msg.recipient_id, msg.sender_id)].append(msg)
else:
messages_by_bucket[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_bucket = {
bucket_tup: len(msgs)
for bucket_tup, msgs in messages_by_bucket.items()
}
for msg_list in messages_by_bucket.values():
msg = min(msg_list, key=lambda msg: msg.date_sent)
if msg.is_stream_message():
context_messages = get_context_for_message(msg)
filtered_context_messages = bulk_access_messages(user_profile, context_messages)
msg_list.extend(filtered_context_messages)
# Sort emails by least recently-active discussion.
bucket_tups: List[Tuple[Tuple[int, str], int]] = []
for bucket_tup, msg_list in messages_by_bucket.items():
max_message_id = max(msg_list, key=lambda msg: msg.id).id
bucket_tups.append((bucket_tup, max_message_id))
bucket_tups = sorted(bucket_tups, key=lambda x: x[1])
# Send an email per bucket.
for bucket_tup, ignored_max_id in bucket_tups:
unique_messages = {}
for m in messages_by_bucket[bucket_tup]:
unique_messages[m.id] = dict(
message=m,
trigger=message_ids.get(m.id),
)
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_bucket[bucket_tup],
)
def log_digest_event(msg: str) -> None:
import logging
import time
logging.Formatter.converter = time.gmtime
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
def followup_day2_email_delay(user: UserProfile) -> timedelta:
days_to_delay = 2
user_tz = user.timezone
if user_tz == '':
user_tz = 'UTC'
signup_day = user.date_joined.astimezone(pytz.timezone(user_tz)).isoweekday()
if signup_day == 5:
# If the day is Friday then delay should be till Monday
days_to_delay = 3
elif signup_day == 4:
# If the day is Thursday then delay should be till Friday
days_to_delay = 1
# The delay should be 1 hour before the above calculated delay as
# our goal is to maximize the chance that this email is near the top
# of the user's inbox when the user sits down to deal with their inbox,
# or comes in while they are dealing with their inbox.
return timedelta(days=days_to_delay, hours=-1)
def enqueue_welcome_emails(user: UserProfile, realm_creation: bool=False) -> None:
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_name = settings.WELCOME_EMAIL_SENDER['name']
from_address = settings.WELCOME_EMAIL_SENDER['email']
else:
from_name = None
from_address = FromAddress.support_placeholder
other_account_count = UserProfile.objects.filter(
delivery_email__iexact=user.delivery_email).exclude(id=user.id).count()
unsubscribe_link = one_click_unsubscribe_link(user, "welcome")
context = common_context(user)
context.update({
'unsubscribe_link': unsubscribe_link,
'keyboard_shortcuts_link': user.realm.uri + '/help/keyboard-shortcuts',
'realm_name': user.realm.name,
'realm_creation': realm_creation,
'email': user.delivery_email,
'is_realm_admin': user.role == UserProfile.ROLE_REALM_ADMINISTRATOR,
})
if user.is_realm_admin:
context['getting_started_link'] = (user.realm.uri +
'/help/getting-your-organization-started-with-zulip')
else:
context['getting_started_link'] = "https://zulip.com"
# Imported here to avoid import cycles.
from zproject.backends import ZulipLDAPAuthBackend, email_belongs_to_ldap
if email_belongs_to_ldap(user.realm, user.delivery_email):
context["ldap"] = True
for backend in get_backends():
# If the user is doing authentication via LDAP, Note that
# we exclude ZulipLDAPUserPopulator here, since that
# isn't used for authentication.
if isinstance(backend, ZulipLDAPAuthBackend):
context["ldap_username"] = backend.django_to_ldap_username(user.delivery_email)
break
send_future_email(
"zerver/emails/followup_day1", user.realm, to_user_ids=[user.id], from_name=from_name,
from_address=from_address, context=context)
if other_account_count == 0:
send_future_email(
"zerver/emails/followup_day2", user.realm, to_user_ids=[user.id], from_name=from_name,
from_address=from_address, context=context, delay=followup_day2_email_delay(user))
def convert_html_to_markdown(html: str) -> str:
parser = html2text.HTML2Text()
markdown = parser.handle(html).strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub("!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
"[\\2](\\1/\\2)", markdown)
| |
from ..encoding import wif_to_secret_exponent
from ..convention import tx_fee
from .Spendable import Spendable
from .Tx import Tx
from .TxOut import TxOut, standard_tx_out_script
from .pay_to import build_hash160_lookup
class SecretExponentMissing(Exception):
pass
class LazySecretExponentDB(object):
"""
The pycoin pure python implementation that converts secret exponents
into public pairs is very slow, so this class does the conversion lazily
and caches the results to optimize for the case of a large number
of secret exponents.
"""
def __init__(self, wif_iterable, secret_exponent_db_cache):
self.wif_iterable = iter(wif_iterable)
self.secret_exponent_db_cache = secret_exponent_db_cache
def get(self, v):
if v in self.secret_exponent_db_cache:
return self.secret_exponent_db_cache[v]
for wif in self.wif_iterable:
secret_exponent = wif_to_secret_exponent(wif)
d = build_hash160_lookup([secret_exponent])
self.secret_exponent_db_cache.update(d)
if v in self.secret_exponent_db_cache:
return self.secret_exponent_db_cache[v]
self.wif_iterable = []
return None
def create_tx(spendables, payables, fee="standard", lock_time=0, version=1):
"""
This function provides the easiest way to create an unsigned transaction.
All coin values are in satoshis.
spendables:
a list of Spendable objects, which act as inputs. These can
be either a Spendable or a Spendable.as_text or a Spendable.as_dict
if you prefer a non-object-based input (which might be easier for
airgapped transactions, for example).
payables:
a list where each entry is a bitcoin address, or a tuple of
(bitcoin address, coin_value). If the coin_value is missing or
zero, this address is thrown into the "split pool". Funds not
explicitly claimed by the fee or a bitcoin address are shared as
equally as possible among the split pool. [Minor point: if the
amount to be split does not divide evenly, some of the earlier
bitcoin addresses will get an extra satoshi.]
fee:
a value, or "standard" for it to be calculated.
version:
the version to use in the transaction. Normally 1.
lock_time:
the lock_time to use in the transaction. Normally 0.
Returns the unsigned Tx transaction. Note that unspents are set, so the
transaction can be immediately signed.
Example:
tx = create_tx(
spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"),
["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"],
fee=0)
This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH
to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might
take a while to confirm, possibly never).
"""
def _fix_spendable(s):
if isinstance(s, Spendable):
return s
if not hasattr(s, "keys"):
return Spendable.from_text(s)
return Spendable.from_dict(s)
spendables = [_fix_spendable(s) for s in spendables]
txs_in = [spendable.tx_in() for spendable in spendables]
txs_out = []
for payable in payables:
if len(payable) == 2:
bitcoin_address, coin_value = payable
else:
bitcoin_address = payable
coin_value = 0
script = standard_tx_out_script(bitcoin_address)
txs_out.append(TxOut(coin_value, script))
tx = Tx(version=version, txs_in=txs_in, txs_out=txs_out, lock_time=lock_time)
tx.set_unspents(spendables)
distribute_from_split_pool(tx, fee)
return tx
def distribute_from_split_pool(tx, fee):
"""
This function looks at TxOut items of a transaction tx and
and puts TxOut items with a coin value of zero into a "split pool".
Funds not explicitly claimed by the fee or other TxOut items are
shared as equally as possible among the split pool. If the amount
to be split does not divide evenly, some of the earlier TxOut items
will get an extra satoshi.
tx:
the transaction
fee:
the reserved fee set aside
"""
# calculate fees
if fee == 'standard':
# TODO: improve this
# 1: the tx is not fully built out, so it will actually be larger than implied at this point
# 2: recommended_fee_for_tx gives estimates that are too high
fee = tx_fee.recommended_fee_for_tx(tx)
zero_count = sum(1 for tx_out in tx.txs_out if tx_out.coin_value == 0)
if zero_count > 0:
total_coin_value = sum(spendable.coin_value for spendable in tx.txs_in_as_spendable())
coins_allocated = sum(tx_out.coin_value for tx_out in tx.txs_out) + fee
remaining_coins = total_coin_value - coins_allocated
if remaining_coins < 0:
raise ValueError("insufficient inputs for outputs")
value_each, extra_count = divmod(remaining_coins, zero_count)
if value_each < 1:
raise ValueError("not enough to pay nonzero amounts to at least one of the unspecified outputs")
for tx_out in tx.txs_out:
if tx_out.coin_value == 0:
tx_out.coin_value = value_each + (1 if extra_count > 0 else 0)
extra_count -= 1
return zero_count
def sign_tx(tx, wifs=[], secret_exponent_db={}, **kwargs):
"""
This function provides an convenience method to sign a transaction.
The transaction must have "unspents" set by, for example,
calling tx.unspents_from_db.
wifs:
the list of WIFs required to sign this transaction.
secret_exponent_db:
an optional dictionary (or any object with a .get method) that contains
a bitcoin address => (secret_exponent, public_pair, is_compressed)
tuple. This will be built automatically lazily with the list of WIFs.
You can pass in an empty dictionary and as WIFs are processed, they
will be cached here. If you have multiple transactions to sign, each with
the same WIF list, passing a cache dictionary in may speed things up a bit.
Returns the signed Tx transaction, or raises an exception.
At least one of "wifs" and "secret_exponent_db" must be included for there
to be any hope of signing the transaction.
Example:
sign_tx(wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"])
"""
tx.sign(LazySecretExponentDB(wifs, secret_exponent_db), **kwargs)
def create_signed_tx(spendables, payables, wifs=[], fee="standard",
lock_time=0, version=1, secret_exponent_db={}, **kwargs):
"""
This function provides an easy way to create and sign a transaction.
All coin values are in satoshis.
spendables, payables, fee, lock_time, version are as in create_tx, above.
wifs, secret_exponent_db are as in sign_tx, above.
Returns the signed Tx transaction, or raises an exception.
At least one of "wifs" and "secret_exponent_db" must be included for there
to be any hope of signing the transaction.
Example:
tx = create_signed_tx(
spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH"),
["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"],
wifs=["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"],
fee=0)
This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH
to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might
take a while to confirm, possibly never).
"""
tx = create_tx(spendables, payables, fee=fee, lock_time=lock_time, version=version)
sign_tx(tx, wifs=wifs, secret_exponent_db=secret_exponent_db, **kwargs)
for idx, tx_out in enumerate(tx.txs_in):
if not tx.is_signature_ok(idx):
raise SecretExponentMissing("failed to sign spendable for %s" %
tx.unspents[idx].bitcoin_address())
return tx
| |
"""
Model storage.
@author: Gautham Ganapathy
@organization: LEMS (http://neuroml.org/lems/, https://github.com/organizations/LEMS)
@contact: gautham@lisphacker.org
"""
import os
from os.path import dirname
import sys
from lems.base.base import LEMSBase
from lems.base.map import Map
from lems.parser.LEMS import LEMSFileParser
from lems.base.util import merge_maps, merge_lists
from lems.model.component import Constant,ComponentType,Component,FatComponent
from lems.base.errors import ModelError
from lems.base.errors import SimBuildError
from lems.model.fundamental import Dimension,Unit,Include
# from lems.model.component import Constant,ComponentType,Component,FatComponent
from lems.model.simulation import Run,Record,EventRecord,DataDisplay,DataWriter,EventWriter
from lems.model.structure import With,EventConnection,ChildInstance,MultiInstantiate
import xml.dom.minidom as minidom
import logging
class Model(LEMSBase):
"""
Stores a model.
"""
logging.basicConfig(level=logging.INFO)
target_lems_version = '0.7.3'
branch = 'development'
schema_location = 'https://raw.githubusercontent.com/LEMS/LEMS/{0}/Schemas/LEMS/LEMS_v{1}.xsd'.format(branch, target_lems_version)
#schema_location = '/home/padraig/LEMS/Schemas/LEMS/LEMS_v%s.xsd'%target_lems_version
debug = False
def __init__(self, include_includes=True, fail_on_missing_includes=True):
"""
Constructor.
"""
self.targets = list()
""" List of targets to be run on startup.
@type: list(str) """
self.includes = Map()
""" Dictionary of includes defined in the model.
@type: dict(str -> lems.model.fundamental.Include """
self.dimensions = Map()
""" Dictionary of dimensions defined in the model.
@type: dict(str -> lems.model.fundamental.Dimension """
self.units = Map()
""" Map of units defined in the model.
@type: dict(str -> lems.model.fundamental.Unit """
self.component_types = Map()
""" Map of component types defined in the model.
@type: dict(str -> lems.model.component.ComponentType) """
self.components = Map()
""" Map of root components defined in the model.
@type: dict(str -> lems.model.component.Component) """
self.fat_components = Map()
""" Map of root fattened components defined in the model.
@type: dict(str -> lems.model.component.FatComponent) """
self.constants = Map()
""" Map of constants in this component type.
@type: dict(str -> lems.model.component.Constant) """
self.include_directories = []
""" List of include directories to search for included LEMS files.
@type: list(str) """
self.included_files = []
""" List of files already included.
@type: list(str) """
self.description = None
""" Short description of contents of LEMS file
@type: str """
self.include_includes = include_includes
""" Whether to include LEMS definitions in <Include> elements
@type: boolean """
self.fail_on_missing_includes = fail_on_missing_includes
""" Whether to raise an Exception when a file in an <Include> element is not found
@type: boolean """
def add_target(self, target):
"""
Adds a simulation target to the model.
@param target: Name of the component to be added as a
simulation target.
@type target: str
"""
self.targets.append(target)
def add_include(self, include):
"""
Adds an include to the model.
@param include: Include to be added.
@type include: lems.model.fundamental.Include
"""
self.includes[include.file] = include
def add_dimension(self, dimension):
"""
Adds a dimension to the model.
@param dimension: Dimension to be added.
@type dimension: lems.model.fundamental.Dimension
"""
self.dimensions[dimension.name] = dimension
def add_unit(self, unit):
"""
Adds a unit to the model.
@param unit: Unit to be added.
@type unit: lems.model.fundamental.Unit
"""
self.units[unit.symbol] = unit
def add_component_type(self, component_type):
"""
Adds a component type to the model.
@param component_type: Component type to be added.
@type component_type: lems.model.fundamental.ComponentType
"""
name = component_type.name
# To handle colons in names in LEMS
if ':' in name:
name = name.replace(':', '_')
component_type.name = name
self.component_types[name] = component_type
def add_component(self, component):
"""
Adds a component to the model.
@param component: Component to be added.
@type component: lems.model.fundamental.Component
"""
self.components[component.id] = component
def add_fat_component(self, fat_component):
"""
Adds a fattened component to the model.
@param fat_component: Fattened component to be added.
@type fat_component: lems.model.fundamental.Fat_component
"""
self.fat_components[fat_component.id] = fat_component
def add_constant(self, constant):
"""
Adds a paramter to the model.
@param constant: Constant to be added.
@type constant: lems.model.component.Constant
"""
self.constants[constant.name] = constant
def add(self, child):
"""
Adds a typed child object to the model.
@param child: Child object to be added.
"""
if isinstance(child, Include):
self.add_include(child)
elif isinstance(child, Dimension):
self.add_dimension(child)
elif isinstance(child, Unit):
self.add_unit(child)
elif isinstance(child, ComponentType):
self.add_component_type(child)
elif isinstance(child, Component):
self.add_component(child)
elif isinstance(child, FatComponent):
self.add_fat_component(child)
elif isinstance(child, Constant):
self.add_constant(child)
else:
raise ModelError('Unsupported child element')
# def add_include_directory(self, path):
# """
# Adds a directory to the include file search path.
#
# @param path: Directory to be added.
# @type path: str
# """
#
# self.include_directories.append(path)
# def include_file(self, path, include_dirs = []):
# """
# Includes a file into the current model.
#
# @param path: Path to the file to be included.
# @type path: str
#
# @param include_dirs: Optional alternate include search path.
# @type include_dirs: list(str)
# """
# if self.include_includes:
# if self.debug: print("------------------ Including a file: %s"%path)
# inc_dirs = include_dirs if include_dirs else self.include_dirs
#
# parser = LEMSFileParser(self, inc_dirs, self.include_includes)
# if os.access(path, os.F_OK):
# if not path in self.included_files:
# parser.parse(open(path).read())
# self.included_files.append(path)
# return
# else:
# if self.debug: print("Already included: %s"%path)
# return
# else:
# for inc_dir in inc_dirs:
# new_path = (inc_dir + '/' + path)
# if os.access(new_path, os.F_OK):
# if not new_path in self.included_files:
# parser.parse(open(new_path).read())
# self.included_files.append(new_path)
# return
# else:
# if self.debug: print("Already included: %s"%path)
# return
# msg = 'Unable to open ' + path
# if self.fail_on_missing_includes:
# raise Exception(msg)
# elif self.debug:
# print(msg)
def import_from_file(self, filepath):
"""
Import a model from a file.
@param filepath: File to be imported.
@type filepath: str
"""
inc_dirs = self.include_directories[:]
inc_dirs.append(dirname(filepath))
parser = LEMSFileParser(self, inc_dirs, self.include_includes)
with open(filepath) as f:
parser.parse(f.read())
# def export_to_dom(self):
# """
# Exports this model to a DOM.
# """
# namespaces = 'xmlns="http://www.neuroml.org/lems/%s" ' + \
# 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ' + \
# 'xsi:schemaLocation="http://www.neuroml.org/lems/%s %s"'
#
# namespaces = namespaces%(self.target_lems_version,self.target_lems_version,self.schema_location)
#
# xmlstr = '<Lems %s>'%namespaces
#
# for include in self.includes:
# xmlstr += include.toxml()
#
# for target in self.targets:
# xmlstr += '<Target component="{0}"/>'.format(target)
#
# for dimension in self.dimensions:
# xmlstr += dimension.toxml()
#
# for unit in self.units:
# xmlstr += unit.toxml()
#
# for constant in self.constants:
# xmlstr += constant.toxml()
#
# for component_type in self.component_types:
# xmlstr += component_type.toxml()
#
# for component in self.components:
# xmlstr += component.toxml()
#
# xmlstr += '</Lems>'
#
# xmldom = minidom.parseString(xmlstr)
# return xmldom
#
# def export_to_file(self, filepath, level_prefix = ' '):
# """
# Exports this model to a file.
#
# @param filepath: File to be exported to.
# @type filepath: str
# """
# xmldom = self.export_to_dom()
# xmlstr = xmldom.toprettyxml(level_prefix, '\n',)
#
#
# f = open(filepath, 'w')
# f.write(xmlstr)
# f.close()
#
def resolve(self):
"""
Resolves references in this model.
"""
model = self.copy()
for ct in model.component_types:
model.resolve_component_type(ct)
for c in model.components:
if c.id not in model.fat_components:
model.add(model.fatten_component(c))
for c in ct.constants:
c2 = c.copy()
c2.numeric_value = model.get_numeric_value(c2.value, c2.dimension)
model.add(c2)
return model
def resolve_component_type(self, component_type):
"""
Resolves references in the specified component type.
@param component_type: Component type to be resolved.
@type component_type: lems.model.component.ComponentType
"""
# Resolve component type from base types if present.
if component_type.extends:
try:
base_ct = self.component_types[component_type.extends]
except:
raise ModelError("Component type '{0}' trying to extend unknown component type '{1}'",
component_type.name, component_type.extends)
self.resolve_component_type(base_ct)
self.merge_component_types(component_type, base_ct)
component_type.types = set.union(component_type.types, base_ct.types)
component_type.extends = None
def merge_component_types(self, ct, base_ct):
"""
Merge various maps in the given component type from a base
component type.
@param ct: Component type to be resolved.
@type ct: lems.model.component.ComponentType
@param base_ct: Component type to be resolved.
@type base_ct: lems.model.component.ComponentType
"""
#merge_maps(ct.parameters, base_ct.parameters)
for parameter in base_ct.parameters:
if parameter.name in ct.parameters:
p = ct.parameters[parameter.name]
basep = base_ct.parameters[parameter.name]
if p.fixed:
p.value = p.fixed_value
p.dimension = basep.dimension
else:
ct.parameters[parameter.name] = base_ct.parameters[parameter.name]
merge_maps(ct.properties, base_ct.properties)
merge_maps(ct.derived_parameters, base_ct.derived_parameters)
merge_maps(ct.index_parameters, base_ct.index_parameters)
merge_maps(ct.constants, base_ct.constants)
merge_maps(ct.exposures, base_ct.exposures)
merge_maps(ct.requirements, base_ct.requirements)
merge_maps(ct.component_requirements, base_ct.component_requirements)
merge_maps(ct.instance_requirements, base_ct.instance_requirements)
merge_maps(ct.children, base_ct.children)
merge_maps(ct.texts, base_ct.texts)
merge_maps(ct.links, base_ct.links)
merge_maps(ct.paths, base_ct.paths)
merge_maps(ct.event_ports, base_ct.event_ports)
merge_maps(ct.component_references, base_ct.component_references)
merge_maps(ct.attachments, base_ct.attachments)
merge_maps(ct.dynamics.state_variables, base_ct.dynamics.state_variables)
merge_maps(ct.dynamics.derived_variables, base_ct.dynamics.derived_variables)
merge_maps(ct.dynamics.conditional_derived_variables, base_ct.dynamics.conditional_derived_variables)
merge_maps(ct.dynamics.time_derivatives, base_ct.dynamics.time_derivatives)
#merge_lists(ct.dynamics.event_handlers, base_ct.dynamics.event_handlers)
merge_maps(ct.dynamics.kinetic_schemes, base_ct.dynamics.kinetic_schemes)
merge_lists(ct.structure.event_connections, base_ct.structure.event_connections)
merge_lists(ct.structure.child_instances, base_ct.structure.child_instances)
merge_lists(ct.structure.multi_instantiates, base_ct.structure.multi_instantiates)
merge_maps(ct.simulation.runs, base_ct.simulation.runs)
merge_maps(ct.simulation.records, base_ct.simulation.records)
merge_maps(ct.simulation.event_records, base_ct.simulation.event_records)
merge_maps(ct.simulation.data_displays, base_ct.simulation.data_displays)
merge_maps(ct.simulation.data_writers, base_ct.simulation.data_writers)
merge_maps(ct.simulation.event_writers, base_ct.simulation.event_writers)
def fatten_component(self, c):
"""
Fatten a component but resolving all references into the corresponding component type.
@param c: Lean component to be fattened.
@type c: lems.model.component.Component
@return: Fattened component.
@rtype: lems.model.component.FatComponent
"""
if self.debug: print("Fattening %s"%c.id)
try:
ct = self.component_types[c.type]
except:
raise ModelError("Unable to resolve type '{0}' for component '{1}'; existing: {2}",
c.type, c.id, self.component_types.keys())
fc = FatComponent(c.id, c.type)
if c.parent_id: fc.set_parent_id(c.parent_id)
### Resolve parameters
for parameter in ct.parameters:
if self.debug: print("Checking: %s"%parameter)
if parameter.name in c.parameters:
p = parameter.copy()
p.value = c.parameters[parameter.name]
p.numeric_value = self.get_numeric_value(p.value, p.dimension)
fc.add_parameter(p)
elif parameter.fixed:
p = parameter.copy()
p.numeric_value = self.get_numeric_value(p.value, p.dimension)
fc.add_parameter(p)
else:
raise ModelError("Parameter '{0}' not initialized for component '{1}'",
parameter.name, c.id)
### Resolve properties
for property in ct.properties:
property2 = property.copy()
fc.add(property2)
### Resolve derived_parameters
for derived_parameter in ct.derived_parameters:
derived_parameter2 = derived_parameter.copy()
fc.add(derived_parameter2)
### Resolve derived_parameters
for index_parameter in ct.index_parameters:
raise ModelError("IndexParameter not yet implemented in PyLEMS!")
index_parameter2 = index_parameter.copy()
fc.add(index_parameter2)
### Resolve constants
for constant in ct.constants:
constant2 = constant.copy()
constant2.numeric_value = self.get_numeric_value(constant2.value, constant2.dimension)
fc.add(constant2)
### Resolve texts
for text in ct.texts:
t = text.copy()
t.value = c.parameters[text.name] if text.name in c.parameters else ''
fc.add(t)
### Resolve texts
for link in ct.links:
if link.name in c.parameters:
l = link.copy()
l.value = c.parameters[link.name]
fc.add(l)
else:
raise ModelError("Link parameter '{0}' not initialized for component '{1}'",
link.name, c.id)
### Resolve paths
for path in ct.paths:
if path.name in c.parameters:
p = path.copy()
p.value = c.parameters[path.name]
fc.add(p)
else:
raise ModelError("Path parameter '{0}' not initialized for component '{1}'",
path.name, c.id)
if len(ct.component_requirements)>0:
raise ModelError("ComponentRequirement not yet implemented in PyLEMS!")
if len(ct.instance_requirements)>0:
raise ModelError("InstanceRequirement not yet implemented in PyLEMS!")
### Resolve component references.
for cref in ct.component_references:
if cref.local:
raise ModelError("Attribute local on ComponentReference not yet implemented in PyLEMS!")
if cref.name in c.parameters:
cref2 = cref.copy()
cid = c.parameters[cref.name]
if cid not in self.fat_components:
self.add(self.fatten_component(self.components[cid]))
cref2.referenced_component = self.fat_components[cid]
fc.add(cref2)
else:
raise ModelError("Component reference '{0}' not initialized for component '{1}'",
cref.name, c.id)
merge_maps(fc.exposures, ct.exposures)
merge_maps(fc.requirements, ct.requirements)
merge_maps(fc.component_requirements, ct.component_requirements)
merge_maps(fc.instance_requirements, ct.instance_requirements)
merge_maps(fc.children, ct.children)
merge_maps(fc.texts, ct.texts)
merge_maps(fc.links, ct.links)
merge_maps(fc.paths, ct.paths)
merge_maps(fc.event_ports, ct.event_ports)
merge_maps(fc.attachments, ct.attachments)
fc.dynamics = ct.dynamics.copy()
if len(fc.dynamics.regimes) != 0:
fc.dynamics.clear()
self.resolve_structure(fc, ct)
self.resolve_simulation(fc, ct)
fc.types = ct.types
### Resolve children
for child in c.children:
fc.add(self.fatten_component(child))
return fc
def get_parent_component(self, fc):
"""
TODO: Replace with more efficient way to do this...
"""
if self.debug: print("Looking for parent of %s (%s)"%(fc.id, fc.parent_id))
parent_comp = None
for comp in self.components.values():
if self.debug: print(" - Checking "+comp.id)
for child in comp.children:
if parent_comp == None:
if child.id == fc.id and comp.id == fc.parent_id:
if self.debug: print("1) It is "+comp.id)
parent_comp = comp
else:
for child2 in child.children:
if self.debug: print(" - Checking child: %s, %s"%(child.id,child2.id))
if parent_comp == None and child2.id == fc.id and child.id == fc.parent_id:
if self.debug: print("2) It is "+child.id)
parent_comp = child
break
else:
if self.debug: print("No..." )
return parent_comp
def resolve_structure(self, fc, ct):
"""
Resolve structure specifications.
"""
if self.debug: print("++++++++ Resolving structure of (%s) with %s"%(fc, ct))
for w in ct.structure.withs:
try:
if w.instance == 'parent' or w.instance == 'this':
w2 = With(w.instance, w.as_)
else:
w2 = With(fc.paths[w.instance].value,
w.as_)
except:
raise ModelError("Unable to resolve With parameters for "
"'{0}' in component '{1}'",
w.as_, fc.id)
fc.structure.add(w2)
if len(ct.structure.tunnels) > 0:
raise ModelError("Tunnel is not yet supported in PyLEMS!");
for fe in ct.structure.for_eachs:
fc.structure.add_for_each(fe)
for ev in ct.structure.event_connections:
try:
from_inst = fc.structure.withs[ev.from_].instance
to_inst = fc.structure.withs[ev.to].instance
if self.debug: print("EC..: "+from_inst+" to "+to_inst+ " in "+str(fc.paths))
if len(fc.texts) > 0 or len(fc.paths) > 0:
source_port = fc.texts[ev.source_port].value if ev.source_port and len(ev.source_port)>0 and ev.source_port in fc.texts else None
target_port = fc.texts[ev.target_port].value if ev.target_port and len(ev.target_port)>0 and ev.target_port in fc.texts else None
if self.debug: print("sp: %s"%source_port)
if self.debug: print("tp: %s"%target_port)
receiver = None
# TODO: Get more efficient way to find parent comp
if '../' in ev.receiver:
receiver_id = None
parent_attr = ev.receiver[3:]
if self.debug: print("Finding %s in the parent of: %s (%i)"%(parent_attr, fc, id(fc)))
for comp in self.components.values():
if self.debug: print(" - Checking %s (%i)" %(comp.id,id(comp)))
for child in comp.children:
if self.debug: print(" - Checking %s (%i)" %(child.id,id(child)))
for child2 in child.children:
if child2.id == fc.id and child2.type == fc.type and child.id == fc.parent_id:
if self.debug: print(" - Got it?: %s (%i), child: %s"%(child.id, id(child), child2))
receiver_id = child.parameters[parent_attr]
if self.debug: print("Got it: "+receiver_id)
break
if receiver_id is not None:
for comp in self.fat_components:
if comp.id == receiver_id:
receiver = comp
if self.debug: print("receiver is: %s"%receiver)
if self.debug: print("rec1: %s"%receiver)
if not receiver:
receiver = fc.component_references[ev.receiver].referenced_component if ev.receiver else None
receiver_container = fc.texts[ev.receiver_container].value if (fc.texts and ev.receiver_container) else ''
if self.debug: print("rec2: %s"%receiver)
if len(receiver_container)==0:
# TODO: remove this hard coded check!
receiver_container = 'synapses'
else:
#if from_inst == 'parent':
#par = fc.component_references[ev.receiver]
if self.debug:
print("+++++++++++++++++++")
print(ev.toxml())
print(ev.source_port)
print(fc)
source_port = ev.source_port
target_port = ev.target_port
receiver = None
receiver_container = None
ev2 = EventConnection(from_inst,
to_inst,
source_port,
target_port,
receiver,
receiver_container)
if self.debug:
print("Created EC: "+ev2.toxml())
print(receiver)
print(receiver_container)
except:
logging.exception("Something awful happened!")
raise ModelError("Unable to resolve event connection parameters in component '{0}'",fc)
fc.structure.add(ev2)
for ch in ct.structure.child_instances:
try:
if self.debug: print(ch.toxml())
if '../' in ch.component:
parent = self.get_parent_component(fc)
if self.debug: print("Parent: %s"%parent)
comp_ref = ch.component[3:]
if self.debug: print("comp_ref: %s"%comp_ref)
comp_id = parent.parameters[comp_ref]
comp = self.fat_components[comp_id]
ch2 = ChildInstance(ch.component, comp)
else:
ref_comp = fc.component_references[ch.component].referenced_component
ch2 = ChildInstance(ch.component, ref_comp)
except Exception as e:
if self.debug: print(e)
raise ModelError("Unable to resolve child instance parameters for "
"'{0}' in component '{1}'",
ch.component, fc.id)
fc.structure.add(ch2)
for mi in ct.structure.multi_instantiates:
try:
if mi.component:
mi2 = MultiInstantiate(component=fc.component_references[mi.component].referenced_component,
number=int(fc.parameters[mi.number].numeric_value))
else:
mi2 = MultiInstantiate(component_type=fc.component_references[mi.component_type].referenced_component,
number=int(fc.parameters[mi.number].numeric_value))
except:
raise ModelError("Unable to resolve multi-instantiate parameters for "
"'{0}' in component '{1}'",
mi.component, fc)
fc.structure.add(mi2)
def resolve_simulation(self, fc, ct):
"""
Resolve simulation specifications.
"""
for run in ct.simulation.runs:
try:
run2 = Run(fc.component_references[run.component].referenced_component,
run.variable,
fc.parameters[run.increment].numeric_value,
fc.parameters[run.total].numeric_value)
except:
raise ModelError("Unable to resolve simulation run parameters in component '{0}'",
fc.id)
fc.simulation.add(run2)
for record in ct.simulation.records:
try:
record2 = Record(fc.paths[record.quantity].value,
fc.parameters[record.scale].numeric_value if record.scale else 1,
fc.texts[record.color].value if record.color else '#000000')
except:
raise ModelError("Unable to resolve simulation record parameters in component '{0}'",
fc.id)
fc.simulation.add(record2)
for event_record in ct.simulation.event_records:
try:
event_record2 = EventRecord(fc.paths[event_record.quantity].value,
fc.texts[event_record.eventPort].value)
except:
raise ModelError("Unable to resolve simulation event_record parameters in component '{0}'",
fc.id)
fc.simulation.add(event_record2)
for dd in ct.simulation.data_displays:
try:
dd2 = DataDisplay(fc.texts[dd.title].value,
'')
if 'timeScale' in fc.parameters:
dd2.timeScale = fc.parameters['timeScale'].numeric_value
except:
raise ModelError("Unable to resolve simulation display parameters in component '{0}'",
fc.id)
fc.simulation.add(dd2)
for dw in ct.simulation.data_writers:
try:
path = ''
if fc.texts[dw.path] and fc.texts[dw.path].value:
path = fc.texts[dw.path].value
dw2 = DataWriter(path,
fc.texts[dw.file_name].value)
except:
raise ModelError("Unable to resolve simulation writer parameters in component '{0}'",
fc.id)
fc.simulation.add(dw2)
for ew in ct.simulation.event_writers:
try:
path = ''
if fc.texts[ew.path] and fc.texts[ew.path].value:
path = fc.texts[ew.path].value
ew2 = EventWriter(path,
fc.texts[ew.file_name].value,
fc.texts[ew.format].value)
except:
raise ModelError("Unable to resolve simulation writer parameters in component '{0}'",
fc.id)
fc.simulation.add(ew2)
def get_numeric_value(self, value_str, dimension = None):
"""
Get the numeric value for a parameter value specification.
@param value_str: Value string
@type value_str: str
@param dimension: Dimension of the value
@type dimension: str
"""
n = None
i = len(value_str)
while n is None:
try:
part = value_str[0:i]
nn = float(part)
n = nn
s = value_str[i:]
except ValueError:
i = i-1
number = n
sym = s
numeric_value = None
if sym == '':
numeric_value = number
else:
if sym in self.units:
unit = self.units[sym]
if dimension:
if dimension != unit.dimension and dimension != '*':
raise SimBuildError("Unit symbol '{0}' cannot "
"be used for dimension '{1}'",
sym, dimension)
else:
dimension = unit.dimension
numeric_value = (number * (10 ** unit.power) * unit.scale) + unit.offset
else:
raise SimBuildError("Unknown unit symbol '{0}'. Known: {1}",
sym, self.units)
#print("Have converted %s to value: %s, dimension %s"%(value_str, numeric_value, dimension))
return numeric_value
| |
# coding=utf-8
# Copyright 2022 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperparameters defining different problems.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import modalities
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
# TODO(rsepassi): Merge these problems with their data generators. Currently
# they only implement the hparams.
class AudioTimitProblem(problem.Problem):
"""Base class for TIMIT problems."""
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"audio/sample_count": tf.FixedLenFeature((), tf.int64),
"audio/sample_width": tf.FixedLenFeature((), tf.int64),
"targets": tf.VarLenFeature(tf.int64),
}
return data_fields, None
def preprocess_example(self, example, mode, hparams):
example = super(AudioTimitProblem, self).preprocess_example(
example, mode, hparams)
# Reshape audio to proper shape
sample_count = tf.to_int32(example.pop("audio/sample_count"))
sample_width = tf.to_int32(example.pop("audio/sample_width"))
channel_count = 1
example["inputs"] = tf.reshape(example["inputs"],
[sample_count, sample_width, channel_count])
return example
@registry.register_problem
class AudioTimitCharactersTune(AudioTimitProblem):
"""TIMIT to characters."""
def feature_encoders(self, _):
return {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.ByteTextEncoder(),
}
def hparams(self, defaults, model_hparams):
hp = defaults
hp.modality = {"inputs": modalities.ModalityType.SPEECH_RECOGNITION,
"targets": modalities.ModalityType.SYMBOL}
hp.vocab_size = {"inputs": None,
"targets": 256}
@registry.register_problem
class AudioTimitTokens8kTune(AudioTimitProblem):
"""TIMIT to tokens."""
@property
def target_vocab_size(self):
return 2**13 # 8192
def feature_encoders(self, data_dir):
vocab_filename = os.path.join(data_dir,
"vocab.endefr.%d" % self.target_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
return {
"inputs": text_encoder.TextEncoder(),
"targets": subtokenizer,
}
def hparams(self, defaults, model_hparams):
hp = defaults
hp.modality = {"inputs": modalities.ModalityType.SPEECH_RECOGNITION,
"targets": modalities.ModalityType.SYMBOL}
hp.vocab_size = {
"inputs": None,
"targets": self.get_feature_encoders()["targets"].vocab_size,
}
hp.batch_size_multiplier = 256
hp.loss_multiplier = 2.0
hp.input_space_id = 13
hp.target_space_id = 3
@registry.register_problem
class AudioTimitTokens8kTest(AudioTimitTokens8kTune):
"""TIMIT to tokens."""
pass
@registry.register_problem
class ParsingEnglishPtb8k(problem.Problem):
"""Parsing."""
@property
def target_vocab_size(self):
return 2**13 # 8192
def feature_encoders(self, data_dir):
vocab_filename = os.path.join(data_dir,
"vocab.endefr.%d" % self.target_vocab_size)
subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename)
return {
"inputs": subtokenizer,
"targets": subtokenizer,
}
def hparams(self, defaults, model_hparams):
hp = defaults
hp.modality = {"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL}
hp.vocab_size = {
"inputs": self.get_feature_encoders()["inputs"].vocab_size,
"targets": self.get_feature_encoders()["targets"].vocab_size,
}
hp.batch_size_multiplier = 256
hp.loss_multiplier = 2.0
hp.input_space_id = 3
hp.target_space_id = 15
@registry.register_problem
class ParsingEnglishPtb16k(problem.Problem):
"""Parsing."""
@property
def vocab_prefix(self):
return "wsj"
@property
def inputs_target_vocab_size(self):
return 2**9 # 512
@property
def targets_target_vocab_size(self):
return 2**14 # 16384
def feature_encoders(self, data_dir):
source_vocab_filename = os.path.join(
data_dir,
self.vocab_prefix + "_source.vocab.%d" % self.inputs_target_vocab_size)
target_vocab_filename = os.path.join(
data_dir,
self.vocab_prefix + "_target.vocab.%d" % self.targets_target_vocab_size)
source_subtokenizer = text_encoder.SubwordTextEncoder(source_vocab_filename)
target_subtokenizer = text_encoder.SubwordTextEncoder(target_vocab_filename)
return {
"inputs": source_subtokenizer,
"targets": target_subtokenizer,
}
def hparams(self, defaults, model_hparams):
hp = defaults
hp.modality = {"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL}
hp.vocab_size = {
"inputs": self.get_feature_encoders()["inputs"].vocab_size,
"targets": self.get_feature_encoders()["targets"].vocab_size,
}
hp.input_space_id = 3
hp.target_space_id = 15
class TestProblem(problem.Problem):
"""Test problem."""
def __init__(self, input_vocab_size, target_vocab_size):
super(TestProblem, self).__init__(False, False)
self.input_vocab_size = input_vocab_size
self.target_vocab_size = target_vocab_size
def hparams(self, defaults, model_hparams):
hp = defaults
hp.modality = {"inputs": modalities.ModalityType.SYMBOL,
"targets": modalities.ModalityType.SYMBOL}
hp.vocab_size = {"inputs": self.input_vocab_size,
"targets": self.target_vocab_size}
def test_problem_hparams(input_vocab_size=None,
target_vocab_size=None,
model_hparams=None):
"""Problem hparams for testing model bodies."""
p = TestProblem(input_vocab_size, target_vocab_size)
return p.get_hparams(model_hparams)
| |
__author__ = 'Jonas Eberle <jonas.eberle@eberle-mail.de>'
import subprocess
import os
import osr
import ogr
import numpy as np
from osgeo import gdal, gdalconst, gdalnumeric, gdal_array
import shutil
from pyEOM import log
LOGGER = log.LOGGER
def TestMODISHDFProcessor():
geom = 'POLYGON((7.807615733070551 26.259757466002124,7.434080576820532 25.607681923751194,8.510740733070508 25.09140328282509,9.082029795570381 25.884760600666922,7.807615733070551 26.259757466002124))'
from pyEOM.datasets.predefined.MODIS import MOD13Q1
dataset = MOD13Q1.Dataset()
bands = dataset.getBands()
raster = MODISHDFProcessor(None, bands)
files = ['MOD13Q1.A2001001.h18v06.005.2008270055643.hdf', 'MOD13Q1.A2001001.h18v07.005.2008270012522.hdf']
for file in files:
raster.extractBands(file)
epsg = None #modis sinusoidal
raster.clipPolygon(geom, epsg, None)
raster.processQuality('PR', '0')
class MODISHDFProcessor(object):
file = None
filename = None
fileparts = []
fileext = []
dirname = None
bands = []
gdal = None
bandfiles = dict()
def __init__(self, file, bands, rastertype, publishPath):
if file != None: self.setFile(file)
self.bands = bands
self.publishPath = publishPath
if not os.path.exists(publishPath+'/data'): os.makedirs(publishPath+'/data')
if not os.path.exists(publishPath+'/data/tmp'): os.makedirs(publishPath+'/data/tmp')
self.gdal = GDAL()
self.wgs84 = osr.SpatialReference()
self.wgs84.ImportFromEPSG(4326)
self.modis_sinu = osr.SpatialReference()
self.modis_sinu.ImportFromProj4 ("+proj=sinu +R=6371007.181 +nadgrids=@null +wktext")
if rastertype == 'CMG':
self.modis_sinu = self.wgs84 #for MODIS CMG files
def setFile(self, file):
self.file = file
self.filename = os.path.basename(file)
self.fileparts = self.filename.split('.')
self.fileext = os.path.splitext(self.filename)
self.dirname = os.path.dirname(file)
def extractBands(self, file=None):
if file != None: self.setFile(file)
for key, band in self.bands.items():
dataset = 'HDF4_EOS:EOS_GRID:"'+self.file+'":"'+band['name']+'"'
output = self.dirname+'/'+self.fileext[0]+'.'+key+self.fileext[1]
output = self.gdal.gdal_translate(dataset, output, 'HDF4Image')
if isinstance(output, basestring):
if 'files' not in self.bands[key]: self.bands[key]['files'] = []
self.bands[key]['files'].append(output)
else:
LOGGER.error('Error in extractBands')
raise Exception('Error in extractBands')
sys.exit(1)
def clipPoint(self, geom):
point = ogr.CreateGeometryFromWkt(geom)
values = dict()
for key, band in self.bands.items():
value = self.gdal.gdallocationinfo(band['files'][0], point.GetX(), point.GetY())
values[key] = value
return values
def clipPolygon(self, geom, format, epsg=None, resample=None):
poly = ogr.CreateGeometryFromWkt(geom)
srs = self.wgs84
if epsg == None:
srs = self.modis_sinu
tx = osr.CoordinateTransformation(self.wgs84, self.modis_sinu)
poly.Transform(tx)
geom = poly.ExportToWkt()
elif epsg != 4326:
srs = osr.SpatialReference().ImportFromEPSG(epsg)
tx = osr.CoordinateTransformation(self.wgs84, srs)
poly.Transform(tx)
geom = poly.ExportToWkt()
self.exportWktToShape(poly, srs)
for key, band in self.bands.items():
#merge
if len(band['files']) > 1:
merge = self.publishPath+'/data/tmp/'+self.fileparts[0]+'.'+self.fileparts[1]+'.'+key+'.merge'+self.fileext[1]
merge = self.gdal.gdal_merge(band['files'], merge, format="HDF4Image", nodata=band['nodata'])
else:
merge = band['files'][0]
#clip
outfile = self.publishPath+'/data/tmp/'+self.fileparts[0]+'.'+self.fileparts[1]+'.'+key+'.clipped'+self.gdal.getFileExtension(format)
self.bandfiles[key] = self.gdal.gdalwarp(merge, outfile, format, band['nodata'], epsg, resample, 'polygon.kml')
return self.bandfiles
def splitBinaryQualityInfo(self, values):
valuesAr = []
for limit in values:
if '<=' in limit:
splitstr = '<='
comparefct = np.less_equal
elif '>=' in limit:
splitstr = '>='
comparefct = np.greater_equal
elif '>' in limit:
splitstr = '>'
comparefct = np.greater
elif '<' in limit:
splitstr = '<'
comparefct = np.less
elif '==' in limit:
splitstr = '=='
comparefct = np.equal
elif '=' in limit:
splitstr = '='
comparefct = np.equal
else:
LOGGER.error('Relation condition wrong!')
raise Exception('Relation condition wrong!')
LOGGER.info('Split string: '+splitstr)
key, value = limit.split(splitstr)
LOGGER.info('Key: '+str(key))
LOGGER.info('Val: '+str(value))
if '-' in key:
start, end = key.split('-')
else:
start = end = int(key)+1
valuesAr.append({'start':int(start),'end':int(end)+1,'value':int(value),'fct':comparefct})
return valuesAr
def checkQualityBinary(self, val, qualityChecks):
qualityFullfilled = []
for item in qualityChecks:
if item['fct'](int(val[item['start']:item['end']]), item['value']):
qualityFullfilled.append(True)
if len(qualityFullfilled) == len(qualityChecks):
return True
else:
return False
def processQualityPoint(self, qaValue, bandlayer, qualityInfo):
if bandlayer not in self.bandfiles:
raise Exception('Given quality band is not available!')
qualityBand = self.bands[bandlayer]
finalfiles = dict()
if qualityBand['quality_datatype'] == 'int':
values = qualityInfo.split(';') #0;1 (e.g., good data and marginal data for MOD13Q1)
if str(qaValue) not in values:
return 0
else:
return 1
elif qualityBand['quality_datatype'] == 'bit':
values = qualityInfo.split(';') #2-5=0000;6-7<10
valuesAr = self.splitBinaryQualityInfo(values)
val = np.binary_repr(qaValue).zfill(16)[::-1] #flipped
result = self.checkQualityBinary(val, valuesAr)
if result:
return 1
else:
return 0
def processQuality(self, bandlayer, qualityInfo):
if bandlayer not in self.bandfiles:
raise Exception('Given quality band is not available!')
qualityFile = self.bandfiles[bandlayer]
LOGGER.info('QualityFile '+str(qualityFile))
qualityArray = gdalnumeric.LoadFile(qualityFile)
qualityValues = np.unique(qualityArray)
qualityBand = self.bands[bandlayer]
if not os.path.exists(self.publishPath+'/data/mask'):
os.makedirs(self.publishPath+'/data/mask')
if not os.path.exists(self.publishPath+'/data/output'):
os.makedirs(self.publishPath+'/data/output')
finalfiles = dict()
for key, band in self.bands.items():
if band['imagetype'] == 'qualityInformation':
continue
dataFile = self.bandfiles[key]
maskFile = self.publishPath+'/data/mask/'+os.path.basename(dataFile)
dataFile = self.publishPath+'/data/output/'+os.path.basename(dataFile)
dataDS = gdal.Open(self.bandfiles[key])
dataBand = dataDS.GetRasterBand(1)
dataArray = dataBand.ReadAsArray(0, 0, dataDS.RasterXSize, dataDS.RasterYSize)
dataNoData = dataBand.GetNoDataValue()
maskArray = np.copy(dataArray)
maskArray[:] = dataNoData
if qualityBand['quality_datatype'] == 'int':
values = qualityInfo.split(';') #0;1 (e.g., good data and marginal data for MOD13Q1)
for quality in qualityValues:
if str(quality) not in values:
dataArray[qualityArray==quality] = dataNoData
maskArray[qualityArray==quality] = 0
else:
maskArray[qualityArray==quality] = 1
elif qualityBand['quality_datatype'] == 'bit':
values = qualityInfo.split(';') #2-5=0000;6-7<10
valuesAr = self.splitBinaryQualityInfo(values)
for quality in qualityValues:
val = np.binary_repr(quality).zfill(16)[::-1] #flipped
result = self.checkQualityBinary(val, valuesAr)
LOGGER.info('Quality value '+val+' set to '+str(result))
if result:
maskArray[qualityArray==quality] = 1
else:
maskArray[qualityArray==quality] = 0
dataArray[qualityArray==quality] = dataNoData
else:
LOGGER.error('No quality info')
raise Exception('No quality info')
dataDSMasked = gdal_array.SaveArray(dataArray, dataFile, 'HDF4Image')
gdal_array.CopyDatasetInfo(dataDS, dataDSMasked)
maskDSMasked = gdal_array.SaveArray(maskArray, maskFile, 'HDF4Image')
gdal_array.CopyDatasetInfo(dataDSMasked, maskDSMasked)
finalfiles[key] = dataFile
maskDS, maskDSMasked, dataDS, dataDSMasked = [None]*4
del maskDS, maskDSMasked, dataDS, dataDSMasked
return finalfiles
def exportWktToShape(self, wkt, srs):
outDriver = ogr.GetDriverByName('KML')
outDataSource = outDriver.CreateDataSource('polygon.kml')
outLayer = outDataSource.CreateLayer('polygon', geom_type=ogr.wkbPolygon, srs=srs)
featureDefn = outLayer.GetLayerDefn()
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(wkt)
outLayer.CreateFeature(outFeature)
outFeature.Destroy
outDataSource.Destroy()
class RasterProcessing(object):
file = None
filename = None
outputformat = "GTiff"
outputfilext = {'GTiff': '.tif', 'netCDF': '.nc'}
def __init__(self, file=None, outputformat=None):
if file != None:
self.file = file
self.filename = os.path.basename(file)
if outputformat != None:
self.outputformat = outputformat
def setFile(self, file):
self.file = file
self.filename = os.path.basename(file)
def setOptions(self):
pass
def extract(self, dataset, datasetext, outputformat, datatype=""):
if outputformat not in self.outputfilext:
raise Exception('Output format is not available in class!')
if datatype == 'MODISHDF':
dataset = 'HDF4_EOS:EOS_GRID:"'+self.file+'":"'+dataset+'"'
output = os.path.splitext(self.file)[0]+datasetext+self.outputfilext[outputformat]
process = GDAL().gdal_translate(dataset, output, format=outputformat)
LOGGER.debug('Process: '+process)
if isinstance(process, basestring):
return process
else:
return False
def merge(self, files, outfile, outputformat="GTiff"):
if len(files) <= 1:
raise Exception('More than 1 file needed to merge')
return GDAL().gdal_merge(files, outfile+self.outputfilext[outputformat], outputformat)
def reproject(self):
pass
def clip(self, geom, dstnodata, epsg, resample):
outfile = os.path.splitext(self.file)[0]+'_clipped'+os.path.splitext(self.file)[1]
f=open('cutline.csv', 'w')
f.write("id,WKT\n")
f.write(',"'+geom+'"')
f.close()
return GDAL().gdalwarp(self.file, outfile, dstnodata, epsg, resample, 'cutline.csv')
def compress(self, type):
pass
class GDAL(object):
outputfilext = {'GTiff': '.tif', 'netCDF': '.nc', 'HDF4Image': '.hdf','VRT': '.vrt', 'KEA': '.kea', }
path = '' # with a training slash
resample_methods = ['near', 'bilinear', 'cubic', 'cubicspline', 'lanczos']
def __init__(self):
pass
def getFileExtension(self, format):
if format not in self.outputfilext:
LOGGER.error('Format '+format+' not available!')
raise Exception('Format '+format+' not available!')
return self.outputfilext[format]
def execute(self, cmd, output):
if isinstance(cmd, list):
cmd = ' '.join(cmd)
LOGGER.info(cmd)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if output != None and os.path.isfile(output):
return output
elif stderr == '':
return stdout.strip()
else:
LOGGER.error(stderr)
return False
def gdal_translate(self, input, output, format="GTiff"):
#if not os.path.isfile(input):
# raise Exception('Input file not found!')
return self.execute([self.path+'gdal_translate', '-of', format, input, output], output)
def gdal_merge(self, inputfiles, output, format="GTiff", nodata=""):
if isinstance(inputfiles, list):
inputfiles = ' '.join(inputfiles)
#if len(inputfiles) <= 1:
# raise Exception('Inputfiles needs more than 1 file!')
return self.execute([self.path+'gdal_merge.py', '-o', output, '-of', format, inputfiles], output)
def gdalwarp(self, inputfile, output, format, dstnodata, epsg, resample, cutline):
if dstnodata != None:
srcnodata = '-srcnodata '+str(dstnodata)
dstnodata = '-dstnodata '+str(dstnodata)
else:
dstnodata = ''
srcnodata = ''
if epsg != None and epsg > 0:
reproject = '-t_srs EPSG:'+str(epsg)
if resample != None and resample in self.resample_methods:
reproject = reproject + ' -r '+resample
else:
reproject = ''
if cutline != None:
cutline = '-cutline '+cutline+' -crop_to_cutline'
if format != None:
format = '-of '+format
else:
format = ''
return self.execute([self.path+'gdalwarp', format, srcnodata, dstnodata, reproject, cutline, inputfile, output], output)
def gdal_compress(self, inputfile, output, type):
return self.execute([self.path+'gdal_translate', '-co COMPRESS='+type, inputfile, output], output)
def gdallocationinfo(self, inputfile, x, y):
return self.execute([self.path+'gdallocationinfo', '-valonly', '-geoloc', '-wgs84', inputfile, str(x), str(y)], None)
def gdalbuildvrt(self, inputs, output, separate=True):
inputfilelist = ''
if os.path.isfile(inputs):
inputfilelist = '-input_file_list '+inputs
inputs = ''
if separate:
separate = '-separate'
else:
separate = ''
return self.execute([self.path+'gdalbuildvrt', separate, inputfilelist, output, inputs], output)
| |
import platform
from glom import glom
from util import *
import nexmo
@responses.activate
def test_get_balance(client, dummy_data):
stub(responses.GET, "https://rest.nexmo.com/account/get-balance")
assert isinstance(client.get_balance(), dict)
assert request_user_agent() == dummy_data.user_agent
@responses.activate
def test_application_info_options(dummy_data):
app_name, app_version = "ExampleApp", "X.Y.Z"
stub(responses.GET, "https://rest.nexmo.com/account/get-balance")
client = nexmo.Client(
key=dummy_data.api_key,
secret=dummy_data.api_secret,
app_name=app_name,
app_version=app_version,
)
user_agent = "nexmo-python/{} python/{} {}/{}".format(
nexmo.__version__,
platform.python_version(),
app_name,
app_version,
)
assert isinstance(client.get_balance(), dict)
assert request_user_agent() == user_agent
@responses.activate
def test_get_country_pricing(client, dummy_data):
stub(responses.GET, "https://rest.nexmo.com/account/get-pricing/outbound")
assert isinstance(client.get_country_pricing("GB"), dict)
assert request_user_agent() == dummy_data.user_agent
assert "country=GB" in request_query()
@responses.activate
def test_get_prefix_pricing(client, dummy_data):
stub(responses.GET, "https://rest.nexmo.com/account/get-prefix-pricing/outbound")
assert isinstance(client.get_prefix_pricing(44), dict)
assert request_user_agent() == dummy_data.user_agent
assert "prefix=44" in request_query()
@responses.activate
def test_get_sms_pricing(client, dummy_data):
stub(responses.GET, "https://rest.nexmo.com/account/get-phone-pricing/outbound/sms")
assert isinstance(client.get_sms_pricing("447525856424"), dict)
assert request_user_agent() == dummy_data.user_agent
assert "phone=447525856424" in request_query()
@responses.activate
def test_get_voice_pricing(client, dummy_data):
stub(
responses.GET, "https://rest.nexmo.com/account/get-phone-pricing/outbound/voice"
)
assert isinstance(client.get_voice_pricing("447525856424"), dict)
assert request_user_agent() == dummy_data.user_agent
assert "phone=447525856424" in request_query()
@responses.activate
def test_update_settings(client, dummy_data):
stub(responses.POST, "https://rest.nexmo.com/account/settings")
params = {"moCallBackUrl": "http://example.com/callback"}
assert isinstance(client.update_settings(params), dict)
assert request_user_agent() == dummy_data.user_agent
assert "moCallBackUrl=http%3A%2F%2Fexample.com%2Fcallback" in request_body()
@responses.activate
def test_topup(client, dummy_data):
stub(responses.POST, "https://rest.nexmo.com/account/top-up")
params = {"trx": "00X123456Y7890123Z"}
assert isinstance(client.topup(params), dict)
assert request_user_agent() == dummy_data.user_agent
assert "trx=00X123456Y7890123Z" in request_body()
@responses.activate
def test_get_account_numbers(client, dummy_data):
stub(responses.GET, "https://rest.nexmo.com/account/numbers")
assert isinstance(client.get_account_numbers(size=25), dict)
assert request_user_agent() == dummy_data.user_agent
assert request_params()["size"] == ["25"]
@responses.activate
def test_list_secrets(client):
stub(
responses.GET,
"https://api.nexmo.com/accounts/meaccountid/secrets",
fixture_path="account/secret_management/list.json",
)
secrets = client.list_secrets("meaccountid")
assert_basic_auth()
assert (
glom(secrets, "_embedded.secrets.0.id")
== "ad6dc56f-07b5-46e1-a527-85530e625800"
)
@responses.activate
def test_list_secrets_missing(client):
stub(
responses.GET,
"https://api.nexmo.com/accounts/meaccountid/secrets",
status_code=404,
fixture_path="account/secret_management/missing.json",
)
with pytest.raises(nexmo.ClientError) as ce:
client.list_secrets("meaccountid")
assert_basic_auth()
assert (
"""ClientError: Invalid API Key: API key 'ABC123' does not exist, or you do not have access (https://developer.nexmo.com/api-errors#invalid-api-key)"""
in str(ce)
)
@responses.activate
def test_get_secret(client):
stub(
responses.GET,
"https://api.nexmo.com/accounts/meaccountid/secrets/mahsecret",
fixture_path="account/secret_management/get.json",
)
secret = client.get_secret("meaccountid", "mahsecret")
assert_basic_auth()
assert secret["id"] == "ad6dc56f-07b5-46e1-a527-85530e625800"
@responses.activate
def test_delete_secret(client):
stub(
responses.DELETE, "https://api.nexmo.com/accounts/meaccountid/secrets/mahsecret"
)
client.delete_secret("meaccountid", "mahsecret")
assert_basic_auth()
@responses.activate
def test_delete_secret_last_secret(client):
stub(
responses.DELETE,
"https://api.nexmo.com/accounts/meaccountid/secrets/mahsecret",
status_code=403,
fixture_path="account/secret_management/last-secret.json",
)
with pytest.raises(nexmo.ClientError) as ce:
client.delete_secret("meaccountid", "mahsecret")
assert_basic_auth()
assert (
"""ClientError: Secret Deletion Forbidden: Can not delete the last secret. The account must always have at least 1 secret active at any time (https://developer.nexmo.com/api-errors/account/secret-management#delete-last-secret)"""
in str(ce)
)
@responses.activate
def test_create_secret(client):
stub(
responses.POST,
"https://api.nexmo.com/accounts/meaccountid/secrets",
fixture_path="account/secret_management/create.json",
)
secret = client.create_secret("meaccountid", "mahsecret")
assert_basic_auth()
assert secret["id"] == "ad6dc56f-07b5-46e1-a527-85530e625800"
@responses.activate
def test_create_secret_max_secrets(client):
stub(
responses.POST,
"https://api.nexmo.com/accounts/meaccountid/secrets",
status_code=403,
fixture_path="account/secret_management/max-secrets.json",
)
with pytest.raises(nexmo.ClientError) as ce:
client.create_secret("meaccountid", "mahsecret")
assert_basic_auth()
assert (
"""ClientError: Maxmimum number of secrets already met: This account has reached maximum number of '2' allowed secrets (https://developer.nexmo.com/api-errors/account/secret-management#maximum-secrets-allowed)"""
in str(ce)
)
@responses.activate
def test_create_secret_validation(client):
stub(
responses.POST,
"https://api.nexmo.com/accounts/meaccountid/secrets",
status_code=400,
fixture_path="account/secret_management/create-validation.json",
)
with pytest.raises(nexmo.ClientError) as ce:
client.create_secret("meaccountid", "mahsecret")
assert_basic_auth()
assert (
"""ClientError: Bad Request: The request failed due to validation errors (https://developer.nexmo.com/api-errors/account/secret-management#validation)"""
in str(ce)
)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .keylevel import _KeyLevel, LineKey, Regex, BooleanKey
from .helpers import ReaderError, SUPPRESS, Namespace
from .py23compat import py23_items, py23_values, py23_basestring
class _KeyAdder(_KeyLevel):
"""An abstract base class that knows how to add keys to itself
and check the keys read within it."""
def __init__(self, case=False):
"""Initiallizes the key holders in this class"""
super(_KeyAdder, self).__init__(case=case)
# Default the key dictionary
self._keys = {}
# The mutually exclusive groups
self._meg = []
def _ensure_default_has_a_value(self, kwargs):
if 'default' not in kwargs:
kwargs['default'] = self._default
def _check_keyname(self, keyname, strid):
"""Run the given keyname through a few checks"""
# Check that the keyname is valid
if not isinstance(keyname, py23_basestring):
raise ValueError('{0}: {1} must be str'.format(repr(keyname), strid))
# Check that the keyname is not defined twice
if keyname in self._keys:
raise ReaderError('The {0} "{1}" has been defined twice'.format(strid, keyname))
# Adjust keyname if this is case sensitive
if not self._case:
keyname = keyname.lower()
return keyname
def _check_case(self, case, keyname):
# Use default case if no case is given here
if case is None:
case = self._case
if not isinstance(case, bool):
raise ValueError(keyname + ': case must be bool, given ' + repr(case))
return case
def add_boolean_key(self, keyname, action=True, **kwargs):
"""Add a boolean key to the input searcher.
:argument keyname:
The name of the boolean-type key to search for.
:type keyname: str
:argument action:
What value to store if this key is found. The default is
:py:obj:`True`.
:argument required:
Indicates that not inlcuding *keyname* is an error.
It makes no sense to include this for a boolean key.
The default is :py:obj:`False`.
If *keyname* is part of a mutually exclusive group, it is best
to set *required* for the group as a whole and not set it for
the individual members of the group because you may get unforseen
errors.
:type required: bool
:argument default:
The value stored for this key if it does not appear in
the input block. A value of :py:obj:`None` is equivalent
to no default. It makes no sense to give a default and mark
it *required* as well.
If the class :py:class:`SUPPRESS` is given instead of
:py:obj:`None`, then this key will be removed from the
namespace if it is not given.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, it is best
to set *default* for the group as a whole and not set it for
the individual members of the group because you may get
unforseen errors.
:argument dest:
If *dest* is given, *keyname* will be stored in the returned
:py:class:`Namespace` as *dest*, not *keyname*.
A value of :py:obj:`None` is equivalent to no dest.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, do not set *dest*
for individual members of the group.
:type dest: str
:argument depends:
Use *depends* to specify another key in the input file
at the same input level (i.e. inside the same block or not
in any block at all) that must also appear or a
:py:exc:`ReaderError` will be raised.
A value of :py:obj:`None` is equivalent to no depends.
The default is :py:obj:`None`.
:type depends: str
:argument repeat:
Determines if *keyname* can appear only once in the input
file or several times. The default is :py:obj:`False`,
which means this the *keyname* can only appear once or an
error will be raised. If *repeat* is :py:obj:`True`, the
collected data will be returned in a list in the order in
which it was read in.
The default is :py:obj:`False`.
:type repeat: bool
"""
keyname = self._check_keyname(keyname, 'keyname')
self._ensure_default_has_a_value(kwargs)
# Store this key
self._keys[keyname] = BooleanKey(keyname, action, **kwargs)
return self._keys[keyname]
def add_line_key(self, keyname, type=str, glob={}, keywords={},
case=None, **kwargs):
"""Add a line key to the input searcher.
:argument keyname:
The name of the key to search for.
:type keyname: str
:argument type:
The data type that to be read in for each positional
argument, given as a :py:obj:`list`. The length of the list
dictates how many arguments to look for. If this is an empty
:py:obj:`list` or :py:obj:`None` no positional arguments will
be read in.
*type* may be one or more of:
- :py:obj:`int`
- :py:obj:`float`
- :py:obj:`str`
- :py:obj:`None`
- an explicit :py:obj:`int` (i.e. :py:const:`4`),
:py:obj:`float` (i.e. :py:const:`5.4`) or :py:obj:`str` (i.e.
:py:const:`"hello"`)
- a compiled regular expression object
If you give an explicit :py:obj:`int`, :py:obj:`float` or
:py:obj:`str`, it is assumed that the
value must equal what you gave. :py:obj:`None` means that the
word :py:const:`"none"` is
what is expected. NOTE: If the entirety of *type* is
:py:obj:`None`, (i.e. ``type=None``), then no types are expected,
and one of *glob* or *keywords* is required.
If you only wish to read in one argument, you may give the type(s)
for that one argument directly (meaning not in a :py:obj:`list`).
This will cause the returned value to be the value itself, not a
1-length :py:obj:`list`.
For each value, you may give a :py:obj:`tuple` of types to indicate
more than one type is valid for that argument position.
NOTE: Is is very important that type choices for each argument are
given as :py:obj:`tuple` s, and that the :py:obj:`list` passed to
*type* is an actual :py:obj:`list` (as opposed to :py:obj:`tuple`)
because these are treated differently.
The default value is :py:obj:`str`.
:argument glob:
*glob* is a :py:obj:`dict` giving information on how to read in a
glob of arguments. Globs are read in after the positional
arguments. If there are no positional arguments, the whole
line is globbed. *glob* is not valid with *keywords*.
The glob :py:obj:`dict` accepts only four keys:
*len*
Must be one of :py:const:`'*'`, :py:const:`'+'`, or
:py:const:`'?'`. :py:const:`'*'` is a zero or more
glob, :py:const:`'+'` is an at least one or more glob, and
:py:const:`'?'` is a zero or one glob.
*type*
Indicates the data type the glob must be. This may be
any one of the types presented for positional arguments.
If this is omitted, then :py:obj:`str` is assumed.
*join*
*join* will join the globbed values as a space-separated
:py:obj:`str` and thus return a single :py:obj:`str`
instead of a :py:obj:`list`.
This is useful for reading in sentences or titles.
The default is :py:obj:`False` if *len* is :py:const:`'*'`
or :py:const:`'+'`
and :py:obj:`True` if *len* is :py:const:`'?'`.
*default*
In the case that no glob is given this is what will
be put into the *glob*. If there is no default,
nothing is put into the *glob*.
By default this is an empty :py:obj:`dict`.
:type glob: dict
:argument keywords:
*keywords* is a nested dictionary indicating key-value
pairs that can be read in. Each key in the dictionary is a
keyword to look for, and the value for that key is another
dictionary with the keys *type* and *default*. If an empty
dictionary or :py:obj:`None` is given, the defaults of
:py:class:`str` and :py:class:`SUPPRESS` will be chosen,
respectively. Like positional arguments, you may give as
many types as you wish per keyword.
By default this is an empty :py:obj:`dict`.
:type keywords: nested dict
:argument case:
States if this particular key is case-sensitive. Note that
this applies only to the arguments of *keyname*; *keyname*
itself uses the case-sensitivity default of the current
level.
By default, case is determined by the global value set when
initiallizing the class.
:type case: bool
:argument required:
Indicates that not inlcuding *keyname* is an error.
It makes no sense to give a *default* and mark it *required*
as well.
The default is :py:obj:`False`
If *keyname* is part of a mutually exclusive group, it is best
to set *required* for the group as a whole and not set it for
the individual members of the group because you may get unforseen
errors.
:type required: bool
:argument default:
The value stored for this key if it does not appear in
the input block. A value of :py:obj:`None` is equivalent
to no default. It makes no sense to give a default and mark
it *required* as well.
If the class :py:class:`SUPPRESS` is given instead of
:py:obj:`None`, then this key will be removed from the
namespace if it is not given.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, it is best
to set *default* for the group as a whole and not set it for
the individual members of the group because you may get
unforseen errors.
:argument dest:
If *dest* is given, *keyname* will be stored in the returned
:py:class:`Namespace` as *dest*, not *keyname*.
A value of :py:obj:`None` is equivalent to no dest.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, do not set *dest*
for individual members of the group.
:type dest: str
:argument depends:
Use *depends* to specify another key in the input file
at the same input level (i.e. inside the same block or not
in any block at all) that must also appear or a
:py:exc:`ReaderError` will be raised.
A value of :py:obj:`None` is equivalent to no depends.
The default is :py:obj:`None`.
:type depends: str
:argument repeat:
Determines if *keyname* can appear only once in the input
file or several times. The default is :py:obj:`False`,
which means this the *keyname* can only appear once or an
error will be raised. If *repeat* is :py:obj:`True`, the
collected data will be returned in a list in the order in
which it was read in.
The default is :py:obj:`False`.
:type repeat: bool
"""
keyname = self._check_keyname(keyname, 'keyname')
self._ensure_default_has_a_value(kwargs)
case = self._check_case(case, keyname)
# Store this key
self._keys[keyname] = LineKey(keyname, type, glob, keywords, case, **kwargs)
return self._keys[keyname]
def add_block_key(self, keyname, end='end', case=None,
ignoreunknown=None, **kwargs):
"""Add a block key to the input searcher.
:argument keyname:
The name of the key to search for.
:type keyname: str
:argument end:
The :py:obj:`str` used to signify the end of this block.
The default is :py:const:`'end'`.
:type end: str
:argument case:
States if this particular key is case-sensitive. Note that
this applies only to the subkeys of *keyname*; *keyname*
itself uses the case-sensitivity default of the current
level.
By default, case is determined by the global value set when
initiallizing the class.
:type case: bool
:argument ignoreunknown:
Suppresses raising the :py:exc:`ReaderError` when an unknown
key is found.
By default, ignoreunknown is determined by the global value set when
initiallizing the class.
:type ignoreunknown: bool
:argument required:
Indicates that not inlcuding *keyname* is an error.
It makes no sense to give a *default* and mark it *required*
as well.
The default is :py:obj:`False`.
If *keyname* is part of a mutually exclusive group, it is best
to set *required* for the group as a whole and not set it for
the individual members of the group because you may get unforseen
errors.
:type required: bool
:argument default:
The value stored for this key if it does not appear in
the input block. A value of :py:obj:`None` is equivalent
to no default. It makes no sense to give a default and mark
it *required* as well.
If the class :py:class:`SUPPRESS` is given instead of
:py:obj:`None`, then this key will be removed from the
namespace if it is not given.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, it is best
to set *default* for the group as a whole and not set it for
the individual members of the group because you may get
unforseen errors.
:argument dest:
If *dest* is given, *keyname* will be stored in the returned
:py:class:`Namespace` as *dest*, not *keyname*.
A value of :py:obj:`None` is equivalent to no dest.
The default is :py:obj:`None`.
If *keyname* is part of a mutually exclusive group and the
group has been given a *dest* value, do not set *dest*
for individual members of the group.
:type dest: str
:argument depends:
Use *depends* to specify another key in the input file
at the same input level (i.e. inside the same block or not
in any block at all) that must also appear or a
:py:exc:`ReaderError` will be raised.
A value of :py:obj:`None` is equivalent to no depends.
The default is :py:obj:`None`.
:type depends: str
:argument repeat:
Determines if *keyname* can appear only once in the input
file or several times. The default is :py:obj:`False`,
which means this the *keyname* can only appear once or an
error will be raised. If *repeat* is :py:obj:`True`, the
collected data will be returned in a list in the order in
which it was read in.
The default is :py:obj:`False`.
:type repeat: bool
"""
keyname = self._check_keyname(keyname, 'keyname')
self._ensure_default_has_a_value(kwargs)
case = self._check_case(case, keyname)
# end must be str
if not isinstance(end, py23_basestring):
raise ValueError(keyname+': end must be str, given '+repr(end))
# Use parents's ignoreunknown if not given
if ignoreunknown is None:
ignoreunknown = self._ignoreunknown
# ignoreunknown must be bool
if not isinstance(ignoreunknown, bool):
raise ValueError(keyname+': ignoreunknown must be bool, '
'given '+repr(ignoreunknown))
# Store this key
self._keys[keyname] = BlockKey(keyname, end, case, ignoreunknown, **kwargs)
# Save the upper default
self._keys[keyname]._upper_case = self._case
return self._keys[keyname]
def add_regex_line(self, handle, regex, case=None, **kwargs):
"""Add a regular expression line to the input searcher.
This searches the entire line based on the given regex.
NOTE: You may either pass a string that will be converted to
a regular expression object, or a compiled regular expression
object.
:argument handle:
The name to store the resultant regex match object in
the namespace. This is required since there is technically
no keyword.
:type handle: str
:argument regex:
The regular expression that is used to search each line.
:type regex: str, compiled re object
:argument case:
Determines if the if the search of this line is case-sensitive.
This only applies if a string is given as *regex*; you determine
if the regex is case-sensitive or not if you compile it yourself.
By default, case is determined by the global value set when
initiallizing the class.
:type case: bool
:argument required:
Indicates that not inlcuding *regex* is an error.
It makes no sense to give a *default* and mark it *required*
as well.
The default is :py:obj:`False`.
If *regex* is part of a mutually exclusive group, it is best
to set *required* for the group as a whole and not set it for
the individual members of the group because you may get unforseen
errors.
:type required: bool
:argument default:
The value stored for this key if it does not appear in
the input block. A value of :py:obj:`None` is equivalent
to no default. It makes no sense to give a default and mark
it *required* as well.
If the class :py:class:`SUPPRESS` is given instead of
:py:obj:`None`, then this key will be removed from the
namespace if it is not given.
The default is :py:obj:`None`.
If *regex* is part of a mutually exclusive group and the
group has been given a *dest* value, it is best
to set *default* for the group as a whole and not set it for
the individual members of the group because you may get
unforseen errors.
:argument dest:
If *dest* is given, *regex* will be stored in the returned
:py:class:`Namespace` as *dest*, not *handle*.
A value of :py:obj:`None` is equivalent to no dest.
The default is :py:obj:`None`.
If *regex* is part of a mutually exclusive group and the
group has been given a *dest* value, do not set *dest*
for individual members of the group.
:type dest: str
:argument depends:
Use *depends* to specify another key in the input file
at the same input level (i.e. inside the same block or not
in any block at all) that must also appear or a
:py:exc:`ReaderError` will be raised.
A value of :py:obj:`None` is equivalent to no depends.
The default is :py:obj:`None`.
:type depends: str
:argument repeat:
Determines if *regex* can appear only once in the input
file or several times. The default is :py:obj:`False`,
which means this the *regex* can only appear once or an
error will be raised. If *repeat* is :py:obj:`True`, the
collected data will be returned in a list in the order in
which it was read in.
The default is :py:obj:`False`.
:type repeat: bool
"""
handle = self._check_keyname(handle, 'handle')
self._ensure_default_has_a_value(kwargs)
case = self._check_case(case, handle)
# Compile the regex if a string.
if isinstance(regex , py23_basestring):
if case:
regex = re.compile(regex)
else:
regex = re.compile(regex, re.IGNORECASE)
# Store this key
self._keys[handle] = Regex(handle, regex, **kwargs)
return self._keys[handle]
def add_mutually_exclusive_group(self, dest=None, default=None, required=False):
"""Defines a mutually exclusive group.
:argument dest:
Defines an alternate name for the key to be stored in rather
than the keyname. Useful if you you wish to access the data
from the mutually exclusive group without having to search
the names of all the keys in the group. It also removes the
names of the keys in this group from the :py:class:`Namespace`.
NOTE: It is best not to set the *dest* value for members of
the group (just the group itself), as it may result in undetected
errors.
:type dest: str
:argument default:
The default to use for the mutually exclusive group.
This is only valid if *dest* is defined. This overwrites the
defaults of the keys in this group.
NOTE: It is best not to set the default
value for members of the group (just the group itself)
as it as it may result in undetected errors.
If :py:class:`SUPPRESS` is given then this group
will not apprear in the namespace if not found in input.
:argument required:
At one of the members of this group is required to be in the
input file
NOTE: It is best not to set the required status for
members of the group (just the group itself), as it may result
in the program flagging errors for keys in this group when there
in fact is no error.
:type required: bool
"""
if default is None:
default = self._default
if dest is not None and not isinstance(dest, py23_basestring):
raise ValueError('dest must be a str, given '+repr(dest))
if not isinstance(required, bool):
raise ValueError('required value must be a bool, '
'given '+repr(required))
# Add this group to the list, then return it
self._meg.append(MutExGroup(self._case, dest, default, required,
self._ignoreunknown))
return self._meg[-1]
def _defaults_and_unfind(self):
"""
Return the defaults for the keys as a dictionary.
Also unfind all keys in case this is the second time
we are reading a file with this class.
"""
defaults = {}
for key, val in py23_items(self._keys)():
if val._default is not SUPPRESS:
name = val._dest if val._dest is not None else val.name
defaults[name] = val._default
for meg in self._meg:
for key, val in py23_items(meg._keys)():
if val._default is not SUPPRESS:
name = val._dest if val._dest is not None else val.name
defaults[name] = val._default
return defaults
def _parse_key_level(self, f, i):
"""Parse the current key level, recursively
parsing sublevels if necessary
"""
# Populate the namespace with the defaults
namespace = Namespace(**self._defaults_and_unfind())
# Populate the namespace with what was found in the input
i, namespace = self._find_keys_in_input(f, i, namespace)
# Post process to make sure that the keys fit the requirements
self._post(namespace)
return i, namespace
def _find_keys_in_input(self, f, i, namespace):
"""Find all the keys in the input block."""
notend = True
while i < len(f) and notend:
# Only search for something if the line is not blank
if f[i]:
# Find if this line belongs to a key
try:
i = self._find_key(f, i, namespace)
except ReaderError as e:
# Error on unknown keys
if self._ignoreunknown:
if 'Unrecognized' not in str(e):
raise ReaderError (self.name+': '+str(e))
else:
raise ReaderError (self.name+': '+str(e))
# Increment to the next line
i += 1
# If we are in the middle of a block, check if this is the end
try:
if self._case and f[i] == self._end:
notend = False
elif not self._case and f[i].lower() == self._end.lower():
notend = False
except AttributeError:
pass # Not a block, no end attribute
except IndexError:
if i == len(f) and self.name != 'main':
raise ReaderError (self.name+': Unterminated block.')
return i, namespace
def _find_key(self, f, i, namespace):
"""Attempt to find a key in this line.
Returns the new current line number.
Raises a ReaderError if the key in this line is unrecognized.
"""
first = f[i].split()[0]
if not self._case:
first = first.lower()
# Find in the usual places
for key, val in py23_items(self._keys)():
try:
if not val._regex.match(f[i]):
continue
except AttributeError:
if key != first:
continue
inew, name, parsed = val._parse(f, i, namespace)
# Add this to the namespace
namespace.add(name, parsed)
return inew
# Look in the mutually exclusive groups if not in usual places
for meg in self._meg:
for key, val in py23_items(meg._keys)():
try:
if not val._regex.match(f[i]):
continue
except AttributeError:
if key != first:
continue
inew, name, parsed = val._parse(f, i, namespace)
namespace.add(name, parsed)
return inew
# If this is a block key, check if this is the end of the block
try:
e = f[i] if self._upper_case else f[i].lower()
except AttributeError:
pass
else:
if e == self._end:
return i+1
# If nothing was found, raise an error
raise ReaderError (self.name+': Unrecognized key: "'+f[i]+'"')
def _post(self, namespace):
"""Post-process the keys."""
# Process the mutually exclusive groups separately
for meg in self._meg:
nkeys = 0
# Loop over each key in this group and count the
# number in the namespace
for key, val in py23_items(meg._keys)():
name = val._dest if val._dest is not None else val.name
if name in namespace:
nkeys += 1
thekey = [name, getattr(namespace, name)]
# If none of the keys in the group were found
if nkeys == 0:
# Alert the user if a required key group was not found
if meg._required:
keys = sorted(meg._keys)
msg = ': One and only one of '
msg += ', '.join([repr(x) for x in keys[:-1]])
msg += ', or '+repr(keys[-1])+' must be included.'
raise ReaderError (self.name+msg)
# Set the dest to the default if not suppressing
elif meg._dest:
if meg._default is not SUPPRESS:
# Set the default
setattr(namespace, meg._dest, meg._default)
# Delete the keys in the group from the namespace
for key in meg._keys:
namespace.remove(key)
# If more than one key was given raise an error
elif nkeys > 1:
keys = sorted(meg._keys)
msg = ': Only one of '
msg += ', '.join([repr(x) for x in keys[:-1]])
msg += ', or '+repr(keys[-1])+' may be included.'
raise ReaderError (self.name+msg)
# Otherwise this meg is good to go
else:
# If there is a dest the prosses the keys
if meg._dest:
# Add the dest name with the value of the found key
setattr(namespace, meg._dest, thekey[1])
# Replace this name in the order list
indx = namespace._order.index(thekey[0])
namespace._order[indx] = meg._dest
# Delete the keys in the group from the namespace defaults
for val in py23_values(meg._keys)():
name = val._dest if val._dest is not None else val.name
namespace.remove(name)
try:
del namespace._defaults[name]
except KeyError:
pass
# Loop over the non-grouped keys and check key requirements
for key, val in py23_items(self._keys)():
name = val._dest if val._dest is not None else val.name
# Identify missing required keys and raise error if not found
if val._required and name not in namespace:
msg = ': The key "'+key+'" is required but not found'
raise ReaderError (self.name+msg)
# Loop over the keys that were found and see if there are any
# dependencies that were not filled.
for key in namespace:
# Check if this key has any dependencies,
# and if so, they are given as well.
for val in py23_values(self._keys)():
name = val._dest if val._dest is not None else val.name
if key == name:
depends = getattr(val, '_depends', None)
break
else:
depends = None
# Raise an error if the depending key is not found
if depends and depends not in namespace:
#if depends and depends not in namespace._order:
msg = ': The key "'+key+'" requires that "'+depends
msg += '" is also present, but it is not'
raise ReaderError (self.name+msg)
# Finalize the namespace
namespace.finalize()
class MutExGroup(_KeyAdder):
"""A class to hold a mutually exclusive group"""
def __init__(self, case, dest, default, required, _ignoreunknown):
"""Initiallizes the mutually exclusive group."""
super(MutExGroup, self).__init__(case=case)
self._default = default
self._dest = dest
# Check strings
self._validate_string(self._dest)
self._required = required
self._ignoreunknown = _ignoreunknown
class BlockKey(_KeyAdder):
"""A class to store data in a block key"""
def __init__(self, keyname, end, case, ignoreunknown, **kwargs):
"""Defines a block key."""
super(BlockKey, self).__init__(case=case)
# Fill in the values
self.name = keyname
if self._case:
self._end = end.lower()
else:
self._end = end
self._ignoreunknown = ignoreunknown
# Add the generic keyword arguments
self._add_kwargs(**kwargs)
# Check strings
self._validate_string(self.name)
self._validate_string(self._dest)
self._validate_string(self._end)
def _parse(self, f, i, namespace):
"""Parses the current line for the key. Returns the line that
we read from and the value"""
# Parse this block
n = len(f[i].split())
if n == 1:
i, val = self._parse_key_level(f, i+1)
return self._return_val(i, val, namespace)
else:
raise ReaderError ('The block "'+self.name+'" was given '
'arguments, this is illegal')
| |
# Vendored up-to-date copy of locket.py
# Based on https://github.com/mwilliamson/locket.py/pull/8
# flake8: noqa
import time
import errno
import threading
import weakref
__all__ = ["lock_file"]
try:
import fcntl
except ImportError:
try:
import ctypes
import ctypes.wintypes
import msvcrt
except ImportError:
raise ImportError(
"Platform not supported (failed to import fcntl, ctypes, msvcrt)"
)
else:
_kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
_WinAPI_LockFile = _kernel32.LockFile
_WinAPI_LockFile.restype = ctypes.wintypes.BOOL
_WinAPI_LockFile.argtypes = [ctypes.wintypes.HANDLE] + [
ctypes.wintypes.DWORD
] * 4
_WinAPI_UnlockFile = _kernel32.UnlockFile
_WinAPI_UnlockFile.restype = ctypes.wintypes.BOOL
_WinAPI_UnlockFile.argtypes = [ctypes.wintypes.HANDLE] + [
ctypes.wintypes.DWORD
] * 4
_lock_file_blocking_available = False
def _lock_file_non_blocking(file_):
res = _WinAPI_LockFile(msvcrt.get_osfhandle(file_.fileno()), 0, 0, 1, 0)
if res:
return True
else:
err = ctypes.get_last_error()
# 33 = ERROR_LOCK_VIOLATION
if err != 33:
raise ctypes.WinError(err)
return False
def _unlock_file(file_):
_WinAPI_UnlockFile(msvcrt.get_osfhandle(file_.fileno()), 0, 0, 1, 0)
else:
_lock_file_blocking_available = True
def _lock_file_blocking(file_):
fcntl.flock(file_.fileno(), fcntl.LOCK_EX)
def _lock_file_non_blocking(file_):
try:
fcntl.flock(file_.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except IOError as error:
if error.errno in [errno.EACCES, errno.EAGAIN]:
return False
else:
raise
def _unlock_file(file_):
fcntl.flock(file_.fileno(), fcntl.LOCK_UN)
_locks_lock = threading.Lock()
_locks = weakref.WeakValueDictionary()
def lock_file(path, **kwargs):
_locks_lock.acquire()
try:
lock = _locks.get(path)
if lock is None:
lock = _create_lock_file(path)
_locks[path] = lock
finally:
_locks_lock.release()
return _Locker(lock, **kwargs)
def _create_lock_file(path):
thread_lock = _ThreadLock(path)
file_lock = _LockFile(path)
return _LockSet([thread_lock, file_lock])
class LockError(Exception):
pass
def _acquire_non_blocking(acquire, timeout, retry_period, path):
if retry_period is None:
retry_period = 0.05
start_time = time.time()
while True:
success = acquire()
if success:
return
elif timeout is not None and time.time() - start_time > timeout:
raise LockError("Couldn't lock {0}".format(path))
else:
time.sleep(retry_period)
class _LockSet:
def __init__(self, locks):
self._locks = locks
def acquire(self, timeout, retry_period):
acquired_locks = []
try:
for lock in self._locks:
lock.acquire(timeout, retry_period)
acquired_locks.append(lock)
except:
for acquired_lock in reversed(acquired_locks):
# TODO: handle exceptions
acquired_lock.release()
raise
def release(self):
for lock in reversed(self._locks):
# TODO: Handle exceptions
lock.release()
class _ThreadLock:
def __init__(self, path):
self._path = path
self._lock = threading.Lock()
def acquire(self, timeout=None, retry_period=None):
if timeout is None:
self._lock.acquire()
else:
_acquire_non_blocking(
acquire=lambda: self._lock.acquire(False),
timeout=timeout,
retry_period=retry_period,
path=self._path,
)
def release(self):
self._lock.release()
class _LockFile:
def __init__(self, path):
self._path = path
self._file = None
self._thread_lock = threading.Lock()
def acquire(self, timeout=None, retry_period=None):
if self._file is None:
self._file = open(self._path, "wb")
if timeout is None and _lock_file_blocking_available:
_lock_file_blocking(self._file)
else:
_acquire_non_blocking(
acquire=lambda: _lock_file_non_blocking(self._file),
timeout=timeout,
retry_period=retry_period,
path=self._path,
)
def release(self):
_unlock_file(self._file)
self._file.close()
self._file = None
class _Locker:
"""
A lock wrapper to always apply the given *timeout* and *retry_period*
to acquire() calls.
"""
def __init__(self, lock, timeout=None, retry_period=None):
self._lock = lock
self._timeout = timeout
self._retry_period = retry_period
def acquire(self):
self._lock.acquire(self._timeout, self._retry_period)
def release(self):
self._lock.release()
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Analysis files.
"""
import sys
import os.path
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import cPickle as pickle
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_integer('ratio_SU', 2, 'ratio of subunits/cells')
flags.DEFINE_float('step_sz', 0.001, 'step size for learning algorithm')
flags.DEFINE_string('model_id', 'poisson', 'which model to fit')
FLAGS = flags.FLAGS
def main(argv):
print('\nCode started')
np.random.seed(FLAGS.np_randseed)
random.seed(FLAGS.randseed)
## Load data summary
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
cells = np.squeeze(data_summary['cells'])
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'logistic':
cells_choose = (cells ==3287) | (cells ==3318 ) | (cells ==3155) | (cells ==3066)
if FLAGS.model_id == 'poisson_full':
cells_choose = np.array(np.ones(np.shape(cells)), dtype='bool')
n_cells = np.sum(cells_choose)
tot_spks = np.squeeze(data_summary['tot_spks'])
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
tot_spks_chosen_cells = tot_spks[cells_choose]
chosen_mask = np.array(np.sum(total_mask[cells_choose,:],0)>0, dtype='bool')
print(np.shape(chosen_mask))
print(np.sum(chosen_mask))
stim_dim = np.sum(chosen_mask)
print('\ndataset summary loaded')
# use stim_dim, chosen_mask, cells_choose, tot_spks_chosen_cells, n_cells
# decide the number of subunits to fit
n_su = FLAGS.ratio_SU*n_cells
# saving details
#short_filename = 'data_model=ASM_pop_bg'
# short_filename = ('data_model=ASM_pop_batch_sz='+ str(FLAGS.batchsz) + '_n_b_in_c' + str(FLAGS.n_b_in_c) +
# '_step_sz'+ str(FLAGS.step_sz)+'_bg')
# saving details
if FLAGS.model_id == 'poisson':
short_filename = ('data_model=ASM_pop_batch_sz='+ str(FLAGS.batchsz) + '_n_b_in_c' + str(FLAGS.n_b_in_c) +
'_step_sz'+ str(FLAGS.step_sz)+'_bg')
if FLAGS.model_id == 'logistic':
short_filename = ('data_model='+ str(FLAGS.model_id) +'_batch_sz='+ str(FLAGS.batchsz) + '_n_b_in_c' + str(FLAGS.n_b_in_c) +
'_step_sz'+ str(FLAGS.step_sz)+'_bg')
if FLAGS.model_id == 'poisson_full':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_batch_sz='+ str(FLAGS.batchsz) + '_n_b_in_c' + str(FLAGS.n_b_in_c) +
'_step_sz'+ str(FLAGS.step_sz)+'_bg')
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
FLAGS.save_location = parent_folder +short_filename + '/'
print(gfile.IsDirectory(FLAGS.save_location))
print(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
with tf.Session() as sess:
# Learn population model!
stim = tf.placeholder(tf.float32, shape=[None, stim_dim], name='stim')
resp = tf.placeholder(tf.float32, name='resp')
data_len = tf.placeholder(tf.float32, name='data_len')
# variables
if FLAGS.model_id == 'poisson' or FLAGS.model_id == 'poisson_full':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.1 * np.random.rand(n_cells, 1, n_su), dtype='float32'))
if FLAGS.model_id == 'logistic':
w = tf.Variable(np.array(0.01 * np.random.randn(stim_dim, n_su), dtype='float32'))
a = tf.Variable(np.array(0.01 * np.random.rand(n_su, n_cells), dtype='float32'))
b_init = np.random.randn(n_cells) #np.log((np.sum(response,0))/(response.shape[0]-np.sum(response,0)))
b = tf.Variable(b_init,dtype='float32')
# get relevant files
file_list = gfile.ListDirectory(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get iteration numbers
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
a_eval = a.eval()
print(np.exp(np.squeeze(a_eval)))
#print(np.shape(a_eval))
# get 2D region to plot
mask2D = np.reshape(chosen_mask, [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
w_eval = w.eval()
plt.figure()
n_su = w_eval.shape[1]
for isu in np.arange(n_su):
xx = np.zeros((3200))
xx[chosen_mask] = w_eval[:, isu]
fig = plt.subplot(np.ceil(np.sqrt(n_su)), np.ceil(np.sqrt(n_su)), isu+1)
plt.imshow(np.reshape(xx, [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
#if FLAGS.model_id == 'logistic' or FLAGS.model_id == 'hinge':
# plt.title(str(a_eval[isu, :]))
#else:
# plt.title(str(np.squeeze(np.exp(a_eval[:, 0, isu]))), fontsize=12)
plt.suptitle('Iteration:' + str(int(iter_plot)) + ' batchSz:' + str(FLAGS.batchsz) + ' step size:' + str(FLAGS.step_sz), fontsize=18)
plt.show()
plt.draw()
if __name__ == '__main__':
app.run()
| |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cmath
import random
import numpy as np
import pytest
import cirq
from cirq import value
from cirq.transformers.analytical_decompositions.two_qubit_to_cz import (
_parity_interaction,
_is_trivial_angle,
two_qubit_matrix_to_diagonal_and_operations,
)
from cirq.testing import random_two_qubit_circuit_with_czs
ALLOW_DEPRECATION_IN_TEST = 'ALLOW_DEPRECATION_IN_TEST'
def test_deprecated_submodule():
with cirq.testing.assert_deprecated(
"Use cirq.transformers.analytical_decompositions.two_qubit_to_cz instead", deadline="v0.16"
):
_ = cirq.optimizers.two_qubit_decompositions.two_qubit_matrix_to_operations
@pytest.mark.parametrize(
'rad,expected',
(
lambda err, largeErr: [
(np.pi / 4, True),
(np.pi / 4 + err, True),
(np.pi / 4 + largeErr, False),
(np.pi / 4 - err, True),
(np.pi / 4 - largeErr, False),
(-np.pi / 4, True),
(-np.pi / 4 + err, True),
(-np.pi / 4 + largeErr, False),
(-np.pi / 4 - err, True),
(-np.pi / 4 - largeErr, False),
(0, True),
(err, True),
(largeErr, False),
(-err, True),
(-largeErr, False),
(np.pi / 8, False),
(-np.pi / 8, False),
]
)(1e-8 * 2 / 3, 1e-8 * 4 / 3),
)
def test_is_trivial_angle(rad, expected):
tolerance = 1e-8
out = _is_trivial_angle(rad, tolerance)
assert out == expected, f'rad = {rad}'
def _operations_to_matrix(operations, qubits):
return cirq.Circuit(operations).unitary(
qubit_order=cirq.QubitOrder.explicit(qubits), qubits_that_should_be_present=qubits
)
def _random_single_partial_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
)
def _random_double_partial_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
np.diag([1, 1, 1, cmath.exp(2j * random.random() * np.pi)]),
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
)
def _random_double_full_cz_effect():
return cirq.dot(
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
cirq.unitary(cirq.CZ),
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
cirq.unitary(cirq.CZ),
cirq.kron(cirq.testing.random_unitary(2), cirq.testing.random_unitary(2)),
)
def assert_cz_depth_below(operations, threshold, must_be_full):
total_cz = 0
for op in operations:
assert len(op.qubits) <= 2
if len(op.qubits) == 2:
assert isinstance(op.gate, cirq.CZPowGate)
e = value.canonicalize_half_turns(op.gate.exponent)
if must_be_full:
assert e == 1
total_cz += abs(e)
assert total_cz <= threshold
def assert_ops_implement_unitary(q0, q1, operations, intended_effect, atol=0.01):
actual_effect = _operations_to_matrix(operations, (q0, q1))
assert cirq.allclose_up_to_global_phase(actual_effect, intended_effect, atol=atol)
@pytest.mark.parametrize(
'max_partial_cz_depth,max_full_cz_depth,effect',
[
(0, 0, np.eye(4)),
(
0,
0,
np.array(
[
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[1, 0, 0, 0j],
]
),
),
(0, 0, cirq.unitary(cirq.CZ ** 0.00000001)),
(0.5, 2, cirq.unitary(cirq.CZ ** 0.5)),
(1, 1, cirq.unitary(cirq.CZ)),
(1, 1, cirq.unitary(cirq.CNOT)),
(
1,
1,
np.array(
[
[1, 0, 0, 1j],
[0, 1, 1j, 0],
[0, 1j, 1, 0],
[1j, 0, 0, 1],
]
)
* np.sqrt(0.5),
),
(
1,
1,
np.array(
[
[1, 0, 0, -1j],
[0, 1, -1j, 0],
[0, -1j, 1, 0],
[-1j, 0, 0, 1],
]
)
* np.sqrt(0.5),
),
(
1,
1,
np.array(
[
[1, 0, 0, 1j],
[0, 1, -1j, 0],
[0, -1j, 1, 0],
[1j, 0, 0, 1],
]
)
* np.sqrt(0.5),
),
(1.5, 3, cirq.map_eigenvalues(cirq.unitary(cirq.SWAP), lambda e: e ** 0.5)),
(2, 2, cirq.unitary(cirq.SWAP).dot(cirq.unitary(cirq.CZ))),
(3, 3, cirq.unitary(cirq.SWAP)),
(
3,
3,
np.array(
[
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0j],
]
),
),
]
+ [(1, 2, _random_single_partial_cz_effect()) for _ in range(10)]
+ [(2, 2, _random_double_full_cz_effect()) for _ in range(10)]
+ [(2, 3, _random_double_partial_cz_effect()) for _ in range(10)]
+ [(3, 3, cirq.testing.random_unitary(4)) for _ in range(10)],
)
def test_two_to_ops_equivalent_and_bounded_for_known_and_random(
max_partial_cz_depth, max_full_cz_depth, effect
):
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
operations_with_partial = cirq.two_qubit_matrix_to_operations(q0, q1, effect, True)
operations_with_full = cirq.two_qubit_matrix_to_operations(q0, q1, effect, False)
assert_ops_implement_unitary(q0, q1, operations_with_partial, effect)
assert_ops_implement_unitary(q0, q1, operations_with_full, effect)
assert_cz_depth_below(operations_with_partial, max_partial_cz_depth, False)
assert_cz_depth_below(operations_with_full, max_full_cz_depth, True)
def test_trivial_parity_interaction_corner_case():
q0 = cirq.NamedQubit('q0')
q1 = cirq.NamedQubit('q1')
nearPi4 = np.pi / 4 * 0.99
tolerance = 1e-2
circuit = cirq.Circuit(_parity_interaction(q0, q1, -nearPi4, tolerance))
assert len(circuit) == 2
def test_kak_decomposition_depth_full_cz():
a, b = cirq.LineQubit.range(2)
# Random.
u = cirq.testing.random_unitary(4)
operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_full)
# 3 CZ, 3+1 PhasedX, 1 Z
assert len(c) <= 8
# Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 2 CZ, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Test unoptimized/un-cleaned length of Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False, 1e-8, False)
c = cirq.Circuit(operations_with_part)
assert len(c) > 6 # Length should be 13 with extra Pauli gates
# Partial single-axis interaction.
u = cirq.unitary(cirq.CNOT ** 0.1)
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 2 CZ, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Full single-axis interaction.
u = cirq.unitary(cirq.ControlledGate(cirq.Y))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, False)
c = cirq.Circuit(operations_with_part)
# 1 CZ, 1+1 PhasedX, 1 Z
assert len(c) <= 4
def test_kak_decomposition_depth_partial_cz():
a, b = cirq.LineQubit.range(2)
# Random.
u = cirq.testing.random_unitary(4)
operations_with_full = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_full)
# 3 CP, 3+1 PhasedX, 1 Z
assert len(c) <= 8
# Double-axis interaction.
u = cirq.unitary(cirq.Circuit(cirq.CNOT(a, b), cirq.CNOT(b, a)))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 2 CP, 2+1 PhasedX, 1 Z
assert len(c) <= 6
# Partial single-axis interaction.
u = cirq.unitary(cirq.CNOT ** 0.1)
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 1 CP, 1+1 PhasedX, 1 Z
assert len(c) <= 4
# Full single-axis interaction.
u = cirq.unitary(cirq.ControlledGate(cirq.Y))
operations_with_part = cirq.two_qubit_matrix_to_operations(a, b, u, True)
c = cirq.Circuit(operations_with_part)
# 1 CP, 1+1 PhasedX, 1 Z
assert len(c) <= 4
@pytest.mark.parametrize(
"v",
[
cirq.unitary(random_two_qubit_circuit_with_czs(3)),
cirq.unitary(random_two_qubit_circuit_with_czs(2)),
np.diag(np.exp(1j * np.pi * np.random.random(4))),
],
)
def test_decompose_to_diagonal_and_circuit(v):
b, c = cirq.LineQubit.range(2)
diagonal, ops = two_qubit_matrix_to_diagonal_and_operations(b, c, v)
assert cirq.is_diagonal(diagonal)
combined_circuit = cirq.Circuit(cirq.MatrixGate(diagonal)(b, c), ops)
circuit_unitary = combined_circuit.unitary(qubits_that_should_be_present=[b, c])
cirq.testing.assert_allclose_up_to_global_phase(circuit_unitary, v, atol=1e-14)
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import defaultfilters as filters
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from horizon.utils.memoized import memoized # noqa
from aws_dashboard.api import ec2
NOT_LAUNCHABLE_FORMATS = ['aki', 'ari']
class LaunchImage(tables.LinkAction):
name = "launch_image"
verbose_name = _("Launch Instance")
url = "horizon:aws:ec2:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
if get_image_type(datum) == "image":
source_type = "image_id"
else:
source_type = "instance_snapshot_id"
params = urlencode({"source_type": source_type,
"source_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class LaunchImageNG(LaunchImage):
name = "launch_image_ng"
verbose_name = _("Launch")
url = "horizon:aws:images:index"
classes = ("btn-launch", )
ajax = False
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchImage, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
imageId = self.table.get_object_id(datum)
url = reverse(self.url)
ngclick = "modal.openLaunchEC2InstanceWizard(" \
"{successUrl: '%s', source_type: 'image', imageId: '%s'})" % (url, imageId)
self.attrs.update({
"ng-controller": "LaunchEC2InstanceModalController as modal",
"ng-click": ngclick
})
return "javascript:void(0);"
class DeleteImage(tables.DeleteAction):
help_text = _("Deleted images are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Image",
u"Delete Images",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Image",
u"Deleted Images",
count
)
def delete(self, request, obj_id):
ec2.delete_image(request, obj_id)
def filter_tenants():
return getattr(settings, 'IMAGES_LIST_FILTER_TENANTS', [])
@memoized
def filter_tenant_ids():
return [ft['tenant'] for ft in filter_tenants()]
def get_image_categories(im, user_tenant_id):
categories = []
if im.is_public:
categories.append('public')
if im.owner == user_tenant_id:
categories.append('project')
elif im.owner in filter_tenant_ids():
categories.append(im.owner)
elif not im.is_public:
categories.append('shared')
categories.append('other')
return categories
def get_image_name(image):
return getattr(image, "name", None) or image.id
def get_image_type(image):
return getattr(image, "properties", {}).get("image_type", "image")
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, image_id):
image = ec2.get_image(request, image_id)
return image
def load_cells(self, image=None):
super(UpdateRow, self).load_cells(image)
# Tag the row with the image category for client-side filtering.
image = self.datum
my_tenant_id = self.table.request.user.tenant_id
image_categories = get_image_categories(image, my_tenant_id)
for category in image_categories:
self.classes.append('category-' + category)
class InstancesFilterAction(tables.FilterAction):
filter_type = "query"
filter_choices = (('name', _("Image Name ="), True),
('status', _("Status ="), True),
('id', _("Image ID ="), True))
class ImagesTable(tables.DataTable):
STATUS_CHOICES = (
("available", True),
("pending", None),
("deregistered", False),
("deleted", False),
("failed", False),
("error", False),
("transient", False),
)
STATUS_DISPLAY_CHOICES = (
("available", pgettext_lazy("Current status of an Image", u"Available")),
("pending", pgettext_lazy("Current status of an Image", u"Pending")),
("deregistered", pgettext_lazy("Current status of an Image",
u"Deregistered")),
("failed", pgettext_lazy("Current status of an Image", u"Failed")),
("error", pgettext_lazy("Current status of an Image", u"Error")),
("transient", pgettext_lazy("Current status of an Image", u"Transient")),
("deleted", pgettext_lazy("Current status of an Image", u"Deleted")),
("deactivated", pgettext_lazy("Current status of an Image",
u"Deactivated")),
)
TYPE_CHOICES = (
("image", pgettext_lazy("Type of an image", u"Image")),
("snapshot", pgettext_lazy("Type of an image", u"Snapshot")),
)
name = tables.WrappingColumn(get_image_name,
# TODO TBD
# link="horizon:aws:images:images:detail",
verbose_name=_("Image Name"),)
image_id = tables.Column("id",
verbose_name=_("Image ID"))
image_type = tables.Column(get_image_type,
verbose_name=_("Type"),
display_choices=TYPE_CHOICES)
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
public = tables.Column("is_public",
verbose_name=_("Public"),
empty_value=False,
filters=(filters.yesno, filters.capfirst))
class Meta(object):
name = "images"
row_class = UpdateRow
status_columns = ["status"]
verbose_name = _("Images")
table_actions = (InstancesFilterAction, DeleteImage,)
launch_actions = (LaunchImageNG,)
row_actions = launch_actions + (DeleteImage, )
| |
#!/usr/bin/env python
# Copyright (c) 2012, AT&T Labs, Yun Mao <yunmao@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""pylint error checking."""
from __future__ import print_function
import json
import re
import sys
from pylint import lint
from six.moves import cStringIO as StringIO # noqa
# These variables will be useful if we will need to skip some pylint checks
ignore_codes = []
ignore_messages = []
# We ignore all errors in openstack.common because it should be checked
# elsewhere.
ignore_modules = ["ceilometer/openstack/common/"]
KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions"
class LintOutput(object):
_cached_filename = None
_cached_content = None
def __init__(self, filename, lineno, line_content, code, message,
lintoutput):
self.filename = filename
self.lineno = lineno
self.line_content = line_content
self.code = code
self.message = message
self.lintoutput = lintoutput
@classmethod
def from_line(cls, line):
m = re.search(r"(\S+):(\d+): \[(\S+)(, \S+)?] (.*)", line)
matched = m.groups()
filename, lineno, code, message = (matched[0], int(matched[1]),
matched[2], matched[-1])
if cls._cached_filename != filename:
with open(filename) as f:
cls._cached_content = list(f.readlines())
cls._cached_filename = filename
line_content = cls._cached_content[lineno - 1].rstrip()
return cls(filename, lineno, line_content, code, message,
line.rstrip())
@classmethod
def from_msg_to_dict(cls, msg):
"""From the output of pylint msg, to a dict.
Each key is a unique error identifier, value is a list of LintOutput
"""
result = {}
for line in msg.splitlines():
obj = cls.from_line(line)
if obj.is_ignored():
continue
key = obj.key()
if key not in result:
result[key] = []
result[key].append(obj)
return result
def is_ignored(self):
if self.code in ignore_codes:
return True
if any(self.filename.startswith(name) for name in ignore_modules):
return True
if any(msg in self.message for msg in ignore_messages):
return True
return False
def key(self):
if self.code in ["E1101", "E1103"]:
# These two types of errors are like Foo class has no member bar.
# We discard the source code so that the error will be ignored
# next time another Foo.bar is encountered.
return self.message, ""
return self.message, self.line_content.strip()
def json(self):
return json.dumps(self.__dict__)
def review_str(self):
return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n"
"%(code)s: %(message)s" % {
"filename": self.filename,
"lineno": self.lineno,
"line_content": self.line_content,
"code": self.code,
"message": self.message,
})
class ErrorKeys(object):
@classmethod
def print_json(cls, errors, output=sys.stdout):
print("# automatically generated by tools/lintstack.py", file=output)
for i in sorted(errors.keys()):
print(json.dumps(i), file=output)
@classmethod
def from_file(cls, filename):
keys = set()
for line in open(filename):
if line and line[0] != "#":
d = json.loads(line)
keys.add(tuple(d))
return keys
def run_pylint():
buff = StringIO()
args = ["--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}",
"-E",
"ceilometer"]
lint.Run(args, exit=False)
val = buff.getvalue()
buff.close()
return val
def generate_error_keys(msg=None):
print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE)
if msg is None:
msg = run_pylint()
errors = LintOutput.from_msg_to_dict(msg)
with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f:
ErrorKeys.print_json(errors, output=f)
def validate(newmsg=None):
print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE)
known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE)
if newmsg is None:
print("Running pylint. Be patient...")
newmsg = run_pylint()
errors = LintOutput.from_msg_to_dict(newmsg)
print("Unique errors reported by pylint: was %d, now %d."
% (len(known), len(errors)))
passed = True
for err_key, err_list in errors.items():
for err in err_list:
if err_key not in known:
print(err.lintoutput)
print()
passed = False
if passed:
print("Congrats! pylint check passed.")
redundant = known - set(errors.keys())
if redundant:
print("Extra credit: some known pylint exceptions disappeared.")
for i in sorted(redundant):
print(json.dumps(i))
print("Consider regenerating the exception file if you will.")
else:
print("Please fix the errors above. If you believe they are false"
" positives, run 'tools/lintstack.py generate' to overwrite.")
sys.exit(1)
def usage():
print("""Usage: tools/lintstack.py [generate|validate]
To generate pylint_exceptions file: tools/lintstack.py generate
To validate the current commit: tools/lintstack.py
""")
def main():
option = "validate"
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "generate":
generate_error_keys()
elif option == "validate":
validate()
else:
usage()
if __name__ == "__main__":
main()
| |
from .constants import MILLI_MICROS,SECOND_MICROS,MINUTE_MICROS
import calendar
from datetime import datetime
from dateutil import parser
from dateutil.tz import tzlocal
from .error import TimeConstructionError
from .sanedelta import SaneDelta
import pytz
#TODO: ensure that this is immutable, and that addiiton,etc always producesa new object!!!
MICROS_TRANSLATIONS = (
(('m','mins','minutes','epoch_mins','epoch_minutes'),MINUTE_MICROS),
(('s','secs','seconds','epoch_secs','epoch_seconds'),SECOND_MICROS),
(('ms','millis','milliseconds','epoch_millis','epoch_milliseconds'),MILLI_MICROS),
(('us','micros','microseconds','epoch_micros','epoch_microseconds'),1) )
MICROS_TRANSLATION_HASH = dict((alt,v) for k,v in MICROS_TRANSLATIONS for alt in k)
class SaneTime(object):
"""
A time stored in epoch microseconds, and optionally decorated with a timezone.
An object of this class represents a moment in time.
A moment in time experience in America/New_York is equal to the same moment in time experienced in Europe/Dublin
"""
"""
Why not store in millis or seconds?
datetime stores things in micros, and since millis already crosses over the 32bit boundary, we
might as well store everything we got in the 64 bit numbers. This will force 32bit machines to
go to long's, so maybe a little reduced performance there, but isn't everything on 64 bit now?
This also avoids the unexpected scenario where two different datetimes would compare as equal
when they were converted to sanetimes. As to why-not-seconds, well that's just lame. You can
easily go to seconds or millis from sanetime by using the .s or .ms properties.
When you do arithmetic with sanetime you are operating on microseconds. st + 1 creates a new
sanetime that is 1 microsecond in the future from the st sanetime.
When you do comparisons, all comparisons are happening at the microsecond level. You are
comparing microseconds in time.
"""
def __init__(self, *args, **kwargs):
"""
acceptable arg inputs:
1) epoch micros integer (or int like)
2) a datetime
NOTE!! a naive datetime is assumed to be in UTC, unless you tell this
method otherwise by also passing in a tz paramter. A timezoned datetime is
preserved with the timezone it has
3) a string representation that the dateutil parser can deal with
4) multiple args just as datetime would accept
acceptable keyworded inputs:
1) us = an int/long in epoch micros
2) ms = an int/long in epoch millis
3) s = an int/long in epoch seconds
4) m = an int/long in epoch minutes
5) tz = a timezone (either a pytz timezone object, a recognizeable pytz timezone string, or a dateutil tz object)
"""
super(time,self).__init__()
uss = set()
tzs = set()
naive_dt = None
avoid_localize = False
for k,v in kwargs.iteritems():
if k in ('tz','timezone'):
tzs.add(SaneTime.to_timezone(v))
elif k in MICROS_TRANSLATION_HASH:
uss.add(MICROS_TRANSLATION_HASH[k]*v)
else:
raise TimeConstructionError("Unexpected kwarg in SaneTime constructor! (%s = %s)" % (k,v))
args = list(args)
if len(args)>2 and len(args)<=8:
args = [datetime(*args)]
if len(args)==2:
tzs.add(SaneTime.to_timezone(args.pop()))
if len(args)==1:
# import pdb; pdb.set_trace()
arg = args.pop()
if hasattr(arg,'__int__'):
uss.add(int(arg))
if hasattr(arg,'tz'): tzs.add(arg.tz)
elif isinstance(arg, basestring):
parts = arg.strip().split(' ')
if len(parts)>1 and parts[-1].startswith('+'):
try:
tzs.add(SaneTime.to_timezone(parts[-1][1:]))
arg = ' '.join(parts[:-1])
except: pass
utc = arg.endswith('Z') or arg.endswith('+00:00') # to deal with strange gunicorn issue -- doesn't want to use UTC time in these cases
arg = parser.parse(arg)
if arg.tzinfo: # parsed timezones are a special breed of retard
if utc: # put this in place to guard against wierd gunicorn issue -- gunicorn will attempt to force local timezone when there's an explicit UTC timezone associated! not sure where that's coming from.
tzs.add(pytz.utc)
arg = arg.replace(tzinfo=None)
elif isinstance(arg.tzinfo, tzlocal): # in case the parser decides to use tzlocal instead of a tzoffset
arg = arg.replace(tzinfo=None)
else:
# can't rely on the dateutil parser for timezone stuff-- so we go back to UTC and force tz to be set in other ways
avoid_localize = True # but we'll still convert back to UTC and allow timezone decoration
arg = arg.astimezone(pytz.utc).replace(tzinfo=None)
if type(arg) == datetime:
naive_dt = arg
if naive_dt.tzinfo:
tzs.add(SaneTime.to_timezone(str(naive_dt.tzinfo)))
naive_dt = naive_dt.replace(tzinfo=None)
if len(tzs)>1:
raise TimeConstructionError("constructor arguments seem to specify more than one different timezone! I can't possibly resolve that! (timezones implied = %s)"%(tzs))
# now we have enough info to figure out the tz:
self.tz = len(tzs) and tzs.pop() or pytz.utc
# and now that we've figured out tz, we can fully deconstruct the dt
if naive_dt:
if avoid_localize:
uss.add(SaneTime.utc_datetime_to_us(naive_dt))
else:
uss.add(SaneTime.utc_datetime_to_us(self.tz.localize(naive_dt).astimezone(pytz.utc)))
# if we got nothing yet for micros, then make it now
if len(uss)==0:
uss.add(SaneTime.utc_datetime_to_us(datetime.utcnow()))
if len(uss)>1:
raise TimeConstructionError("constructor arguments seem to specify more than one different time! I can't possibly resolve that! (micro times implied = %s)"%(uss))
self.us = uss.pop()
if len(args)>0:
raise TimeConstructionError("Unexpected constructor arguments")
@property
def ms(self): return self.us/MILLI_MICROS
epoch_milliseconds = epoch_millis = milliseconds = millis = ms
@property
def s(self): return self.us/SECOND_MICROS
epoch_seconds = epoch_secs = seconds = secs = s
@property
def m(self): return self.us/MINUTE_MICROS
epoch_minutes = epoch_mins = minutes = mins = m
@property
def micros(self): return self.us
epoch_microseconds = epoch_micros = microseconds = micros
@property
def tz_name(self): return self.tz.zone
@property
def tz_abbr(self): return self.tz._tzname
def set_tz(self, tz):
self.tz = self.__class__.to_timezone(tz); return self
def with_tz(self, tz):
return self.__class__(self.us,tz)
@property
def _tuple(self): return (self.us, self.tz)
def strftime(self, *args, **kwargs): return self.datetime.strftime(*args, **kwargs)
def __cmp__(self, other):
if not hasattr(other, '__int__'): other = SaneTime(other)
return cmp(self.us, int(other))
def __hash__(self): return self.us.__hash__()
def __add__(self, operand):
if not hasattr(operand, '__int__'): operand = SaneTime(operand)
return self.__class__(self.us + int(operand),tz=self.tz)
def __sub__(self, operand):
if not hasattr(operand, '__int__'): operand = SaneTime(operand)
if isinstance(operand, SaneTime): return SaneDelta(self.us - int(operand))
return self.__add__(-int(operand))
def __mul__(self, operand):
return self.us * int(operand)
def __div__(self, operand):
return self.us / int(operand)
def __int__(self): return int(self.us)
def __long__(self): return long(self.us)
def __repr__(self): return u"SaneTime(%s,%s)" % (self.us,repr(self.tz))
def __str__(self): return unicode(self).encode('utf-8')
def __unicode__(self):
dt = self.datetime
micros = u".%06d"%dt.microsecond if dt.microsecond else ''
time = u" %02d:%02d:%02d%s"%(dt.hour,dt.minute,dt.second,micros) if dt.microsecond or dt.second or dt.minute or dt.hour else ''
return u"%04d-%02d-%02d%s +%s" % (dt.year, dt.month, dt.day, time, dt.tzinfo.zone)
def clone(self):
""" cloning stuff """
return self.__class__(self.us,self.tz)
@property
def ny_str(self):
""" a ny string """
return self.ny_ndt.strftime('%I:%M:%S%p %m/%d/%Y')
@property
def utc_datetime(self): return SaneTime.us_to_utc_datetime(self.us)
utc_dt = utc_datetime
@property
def utc_naive_datetime(self): return self.utc_datetime.replace(tzinfo=None)
utc_ndt = utc_naive_datetime
def to_timezoned_datetime(self, tz): return self.utc_datetime.astimezone(SaneTime.to_timezone(tz))
def to_timezoned_naive_datetime(self, tz): return self.to_timezoned_datetime(tz).replace(tzinfo=None)
@property
def datetime(self): return self.to_timezoned_datetime(self.tz)
dt = datetime
@property
def naive_datetime(self): return self.to_timezoned_naive_datetime(self.tz)
ndt = naive_datetime
@property
def ny_datetime(self): return self.to_timezoned_datetime('America/New_York')
ny_dt = ny_datetime
@property
def ny_naive_datetime(self): return self.to_timezoned_naive_datetime('America/New_York')
ny_ndt = ny_naive_datetime
@property
def year(self): return self.dt.year
@property
def month(self): return self.dt.month
@property
def day(self): return self.dt.day
@property
def hour(self): return self.dt.hour
@property
def minute(self): return self.dt.minute
@property
def second(self): return self.dt.second
@property
def microsecond(self): return self.dt.microsecond
#def add_datepart(self, months=None, years=None, auto_day_adjust=True):
#months = (months or 0) + (years or 0) * 12
#dt = self.utc_dt
#day = dt.day
#month = dt.month + months%12
#year = dt.year + months/12
#if auto_day_adjust:
#if day>=29 and month==2:
#leap_year = year%4==0 and (not year%100==0 or year%400==0)
#day = 29 if leap_year else 28
#elif day==31 and month in (4,6,9,11):
#day = 30
#return SaneTime(fucked_datetime(year,month,day,dt.hour,dt.minute,dt.second,dt.microsecond,tz=pytz.utc))
@classmethod
def utc_datetime_to_us(kls, dt):
return calendar.timegm(dt.timetuple())*1000**2+dt.microsecond
@classmethod
def us_to_utc_datetime(kls, us):
return pytz.utc.localize(datetime.utcfromtimestamp(us/10**6)).replace(microsecond = us%10**6)
@classmethod
def to_timezone(kls, tz):
if not isinstance(tz, basestring): return tz
return pytz.timezone(tz)
# null passthru utility
def ntime(*args, **kwargs):
if args:
if args[0] is None: return None
elif kwargs:
if None in [v for k,v in kwargs.iteritems() if k!='tz']: return None
return SaneTime(*args, **kwargs)
#primary aliases
time = sanetime = SaneTime
nsanetime = ntime
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""URL routing definitions, and some basic error/warmup handlers."""
__author__ = 'Sean Lip'
import feconf
import logging
from core.controllers import admin
from core.controllers import base
from core.controllers import editor
from core.controllers import feedback
from core.controllers import galleries
from core.controllers import home
from core.controllers import moderator
from core.controllers import pages
from core.controllers import profile
from core.controllers import reader
from core.controllers import recent_commits
from core.controllers import resources
from core.controllers import services
from core.platform import models
transaction_services = models.Registry.import_transaction_services()
from mapreduce import main as mapreduce_main
from mapreduce import parameters as mapreduce_parameters
import webapp2
from webapp2_extras.routes import RedirectRoute
class FrontendErrorHandler(base.BaseHandler):
"""Handles errors arising from the frontend."""
REQUIRE_PAYLOAD_CSRF_CHECK = False
def post(self):
"""Records errors reported by the frontend."""
logging.error('Frontend error: %s' % self.payload.get('error'))
self.render_json(self.values)
class WarmupHandler(base.BaseHandler):
"""Handles warmup requests."""
def get(self):
"""Handles GET warmup requests."""
pass
# Regex for base64 id encoding
r = '[A-Za-z0-9=_-]+'
def generate_static_url_tuples():
static_urls = []
url_tuples = []
for url in feconf.PATH_MAP:
static_urls.append(url + '.+')
for url in static_urls:
url_tuples.append((url, resources.StaticFileHandler))
return url_tuples
def get_redirect_route(regex_route, handler, name, defaults=None):
"""Returns a route that redirects /foo/ to /foo.
Warning: this method strips off parameters after the trailing slash. URLs
with parameters should be formulated without the trailing slash.
"""
if defaults is None:
defaults = {}
return RedirectRoute(
regex_route, handler, name, strict_slash=True, defaults=defaults)
def authorization_wrapper(self, *args, **kwargs):
# developers.google.com/appengine/docs/python/taskqueue/overview-push
# promises that this header cannot be set by external callers. If this
# is present, we can be certain that the request is internal and from
# the task queue worker.
if 'X-AppEngine-TaskName' not in self.request.headers:
self.response.out.write('Forbidden')
self.response.set_status(403)
return
self.real_dispatch(*args, **kwargs)
def ui_access_wrapper(self, *args, **kwargs):
self.real_dispatch(*args, **kwargs)
mapreduce_handlers = []
for path, handler_class in mapreduce_main.create_handlers_map():
if path.startswith('.*/pipeline'):
if 'pipeline/rpc/' in path or path == '.*/pipeline(/.+)':
path = path.replace('.*/pipeline', '/mapreduce/ui/pipeline')
else:
path = path.replace('.*/pipeline', '/mapreduce/worker/pipeline')
else:
if '_callback' in path:
path = path.replace('.*', '/mapreduce/worker', 1)
elif '/list_configs' in path:
continue
else:
path = path.replace('.*', '/mapreduce/ui', 1)
if '/ui/' in path or path.endswith('/ui'):
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = ui_access_wrapper
mapreduce_handlers.append((path, handler_class))
else:
if (hasattr(handler_class, 'dispatch') and
not hasattr(handler_class, 'real_dispatch')):
handler_class.real_dispatch = handler_class.dispatch
handler_class.dispatch = authorization_wrapper
mapreduce_handlers.append((path, handler_class))
# Tell map/reduce internals that this is now the base path to use.
mapreduce_parameters.config.BASE_PATH = '/mapreduce/worker'
# Register the URLs with the classes responsible for handling them.
urls = [
get_redirect_route(r'/_ah/warmup', WarmupHandler, 'warmup_handler'),
get_redirect_route(
r'/notifications_dashboard', home.NotificationsDashboardPage,
'notifications_dashboard_handler'),
get_redirect_route(
r'/notificationsdashboardhandler/data',
home.NotificationsDashboardHandler,
'notifications_dashboard_handler'),
get_redirect_route(
r'/my_explorations', home.MyExplorationsPage, 'my_explorations_page'),
get_redirect_route(
r'/myexplorationshandler/data', home.MyExplorationsHandler,
'my_explorations_handler'),
get_redirect_route(r'/about', pages.AboutPage, 'about_page'),
get_redirect_route(
r'/editor_tutorial', pages.EditorTutorialPage, 'editor_tutorial_page'),
get_redirect_route(
r'/participate', pages.ParticipatePage, 'participate_page'),
get_redirect_route(
r'/site_guidelines', pages.ParticipatePage,
'redirect_to_participate_page'),
get_redirect_route(
r'/contact', pages.AboutPage, 'redirect_to_about_page'),
get_redirect_route(r'/forum', pages.ForumPage, 'forum_page'),
get_redirect_route(r'/admin', admin.AdminPage, 'admin_page'),
get_redirect_route(r'/adminhandler', admin.AdminHandler, 'admin_handler'),
get_redirect_route(
r'/adminjoboutput', admin.AdminJobOutput, 'admin_job_output'),
get_redirect_route(
r'/admintopicscsvdownloadhandler',
admin.AdminTopicsCsvDownloadHandler,
'admin_topics_csv_download_handler'),
get_redirect_route(
r'/imagehandler/<exploration_id>/<encoded_filepath>',
resources.ImageHandler, 'image_handler'),
get_redirect_route(
r'/object_editor_template/<obj_type>',
resources.ObjectEditorTemplateHandler, 'object_editor_template'),
get_redirect_route(
r'/value_generator_handler/<generator_id>',
resources.ValueGeneratorHandler, 'value_generator_handler'),
get_redirect_route(r'/', galleries.GalleryPage, 'gallery_page'),
get_redirect_route(
r'%s' % feconf.GALLERY_URL, galleries.GalleryPage, 'gallery_page'),
get_redirect_route(
r'%s' % feconf.GALLERY_DATA_URL, galleries.GalleryHandler,
'gallery_handler'),
get_redirect_route(
r'%s' % feconf.LEARN_GALLERY_URL, galleries.GalleryRedirectPage,
'learn_gallery_page'),
get_redirect_route(
r'%s' % feconf.PLAYTEST_QUEUE_URL, galleries.GalleryRedirectPage,
'playtest_queue_page'),
get_redirect_route(
r'%s' % feconf.CONTRIBUTE_GALLERY_URL, galleries.GalleryRedirectPage,
'contribute_gallery_page'),
get_redirect_route(
r'%s' % feconf.NEW_EXPLORATION_URL,
galleries.NewExploration, 'new_exploration'),
get_redirect_route(
r'%s' % feconf.UPLOAD_EXPLORATION_URL,
galleries.UploadExploration, 'upload_exploration'),
get_redirect_route(
r'/explorationsummarieshandler/data',
galleries.ExplorationSummariesHandler, 'exploration_summaries_handler'),
get_redirect_route(
r'/profile/<username>', profile.ViewProfilePage, 'profile_page'),
get_redirect_route(
r'/preferences', profile.PreferencesPage, 'preferences_page'),
get_redirect_route(
r'/preferenceshandler/data', profile.PreferencesHandler,
'preferences_handler'),
get_redirect_route(
r'/preferenceshandler/profile_picture', profile.ProfilePictureHandler,
'profle_picture_handler'),
get_redirect_route(
r'%s' % feconf.SIGNUP_URL, profile.SignupPage, 'signup_page'),
get_redirect_route(
r'%s' % feconf.SIGNUP_DATA_URL, profile.SignupHandler,
'signup_handler'),
get_redirect_route(
r'%s' % feconf.USERNAME_CHECK_DATA_URL,
profile.UsernameCheckHandler, 'username_check_handler'),
get_redirect_route(
r'/moderator', moderator.ModeratorPage, 'moderator_page'),
get_redirect_route(
r'/moderatorhandler/user_services',
moderator.UserServiceHandler, 'moderator_user_service_handler'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.EXPLORATION_URL_PREFIX,
reader.ExplorationPage, 'exploration_page'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.EXPLORATION_INIT_URL_PREFIX,
reader.ExplorationHandler, 'exploration_handler'),
get_redirect_route(
r'/explorehandler/exploration_start_event/<exploration_id>',
reader.ExplorationStartEventHandler,
'exploration_start_event_handler'),
get_redirect_route(
r'/explorehandler/state_hit_event/<exploration_id>',
reader.StateHitEventHandler, 'state_hit_event_handler'),
get_redirect_route(
r'/explorehandler/answer_submitted_event/<exploration_id>',
reader.AnswerSubmittedEventHandler, 'answer_submitted_event_handler'),
get_redirect_route(
r'/explorehandler/give_feedback/<exploration_id>',
reader.ReaderFeedbackHandler, 'reader_feedback_handler'),
get_redirect_route(
r'/explorehandler/exploration_complete_event/<exploration_id>',
reader.ExplorationCompleteEventHandler, 'reader_complete_handler'),
get_redirect_route(
r'/explorehandler/exploration_maybe_leave_event/<exploration_id>',
reader.ExplorationMaybeLeaveHandler, 'reader_leave_handler'),
get_redirect_route(
r'/explorehandler/classify/<exploration_id>', reader.ClassifyHandler,
'reader_classify_handler'),
get_redirect_route(
r'/explorehandler/rating/<exploration_id>',
reader.RatingHandler, 'rating_handler'),
get_redirect_route(
r'/explorehandler/recommendations/<exploration_id>',
reader.RecommendationsHandler, 'recommendations_handler'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.EDITOR_URL_PREFIX,
editor.ExplorationPage, 'editor_exploration_page'),
get_redirect_route(
r'/createhandler/data/<exploration_id>', editor.ExplorationHandler,
'editor_exploration_handler'),
get_redirect_route(
r'/createhandler/change_list_summary/<exploration_id>',
editor.ChangeListSummaryHandler, 'change_list_summary'),
get_redirect_route(
r'/createhandler/download/<exploration_id>',
editor.ExplorationDownloadHandler, 'exploration_download_handler'),
get_redirect_route(
r'/createhandler/download_state/<exploration_id>',
editor.StateDownloadHandler, 'state_download_handler'),
get_redirect_route(
r'/createhandler/imageupload/<exploration_id>',
editor.ImageUploadHandler, 'image_upload_handler'),
get_redirect_route(
r'/createhandler/resolved_answers/<exploration_id>/<escaped_state_name>',
editor.ResolvedAnswersHandler, 'resolved_answers_handler'),
get_redirect_route(
r'/createhandler/resource_list/<exploration_id>',
editor.ExplorationResourcesHandler, 'exploration_resources_handler'),
get_redirect_route(
r'/createhandler/revert/<exploration_id>',
editor.ExplorationRevertHandler, 'exploration_revert_handler'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.EXPLORATION_RIGHTS_PREFIX,
editor.ExplorationRightsHandler, 'exploration_rights_handler'),
get_redirect_route(
r'/createhandler/snapshots/<exploration_id>',
editor.ExplorationSnapshotsHandler, 'exploration_snapshots_handler'),
get_redirect_route(
r'/createhandler/statisticsversion/<exploration_id>',
editor.ExplorationStatsVersionsHandler, 'exploration_stats_versions_handler'),
get_redirect_route(
r'/createhandler/statistics/<exploration_id>/<exploration_version>',
editor.ExplorationStatisticsHandler, 'exploration_statistics_handler'),
get_redirect_route(
r'/createhandler/state_rules_stats/<exploration_id>/<escaped_state_name>',
editor.StateRulesStatsHandler, 'state_rules_stats_handler'),
get_redirect_route(
r'/createhandler/started_tutorial_event/<exploration_id>',
editor.StartedTutorialEventHandler, 'started_tutorial_event_handler'),
get_redirect_route(
r'%s' % feconf.RECENT_COMMITS_DATA_URL,
recent_commits.RecentCommitsHandler, 'recent_commits_handler'),
get_redirect_route(
r'%s' % feconf.RECENT_FEEDBACK_MESSAGES_DATA_URL,
feedback.RecentFeedbackMessagesHandler,
'recent_feedback_messages_handler'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.FEEDBACK_LAST_UPDATED_URL_PREFIX,
feedback.FeedbackLastUpdatedHandler, 'feedback_last_updated_handler'),
get_redirect_route(
r'%s/<exploration_id>' % feconf.FEEDBACK_THREADLIST_URL_PREFIX,
feedback.ThreadListHandler, 'feedback_threadlist_handler'),
get_redirect_route(
r'%s/<exploration_id>/<thread_id>' % feconf.FEEDBACK_THREAD_URL_PREFIX,
feedback.ThreadHandler, 'feedback_thread_handler'),
get_redirect_route(
r'/notificationshandler', home.NotificationsHandler,
'notifications_handler'),
get_redirect_route(
r'/filereadhandler', services.FileReadHandler, 'file_read_handler'),
get_redirect_route(
r'/frontend_errors', FrontendErrorHandler, 'frontend_error_handler'),
get_redirect_route(
r'/logout', base.LogoutPage, 'logout_page_handler'),
# 404 error handler.
get_redirect_route(r'/<:.*>', base.Error404Handler, 'error_404_handler'),
]
urls = mapreduce_handlers + urls
app = transaction_services.toplevel_wrapper(
webapp2.WSGIApplication(urls, debug=feconf.DEBUG))
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Functional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.python.autograph.core import ag_ctx as autograph_ctx
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["map_fn"])
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transforms `elems` by applying `fn` to each element unstacked on axis 0.
See also `tf.scan`.
`map_fn` unstacks `elems` on axis 0 to obtain a sequence of elements;
calls `fn` to transform each element; and then stacks the transformed
values back together.
#### Mapping functions with single-Tensor inputs and outputs
If `elems` is a single tensor and `fn`'s signature is `tf.Tensor->tf.Tensor`,
then `map_fn(fn, elems)` is equivalent to
`tf.stack([fn(elem) for elem in tf.unstack(elems)])`. E.g.:
>>> tf.map_fn(fn=lambda t: tf.range(t, t + 3), elems=tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
`map_fn(fn, elems).shape = [elems.shape[0]] + fn(elems[0]).shape`.
#### Mapping functions with multi-arity inputs and outputs
`map_fn` also supports functions with multi-arity inputs and outputs:
* If `elems` is a tuple (or nested structure) of tensors, then those tensors
must all have the same outer-dimension size (`num_elems`); and `fn` is
used to transform each tuple (or structure) of corresponding slices from
`elems`. E.g., if `elems` is a tuple `(t1, t2, t3)`, then `fn` is used to
transform each tuple of slices `(t1[i], t2[i], t3[i])`
(where `0 <= i < num_elems`).
* If `fn` returns a tuple (or nested structure) of tensors, then the
result is formed by stacking corresponding elements from those structures.
#### Specifying `fn`'s output signature
If `fn`'s input and output signatures are different, then the output
signature must be specified using `fn_output_signature`. (The input and
output signatures are differ if their structures, dtypes, or tensor types do
not match). E.g.:
>>> tf.map_fn(fn=tf.strings.length, # input & output have different dtypes
... elems=tf.constant(["hello", "moon"]),
... fn_output_signature=tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([5, 4], dtype=int32)>
>>> tf.map_fn(fn=tf.strings.join, # input & output have different structures
... elems=[tf.constant(['The', 'A']), tf.constant(['Dog', 'Cat'])],
... fn_output_signature=tf.string)
<tf.Tensor: shape=(2,), dtype=string,
numpy=array([b'TheDog', b'ACat'], dtype=object)>
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
#### RaggedTensors
`map_fn` supports `tf.RaggedTensor` inputs and outputs. In particular:
* If `elems` is a `RaggedTensor`, then `fn` will be called with each
row of that ragged tensor.
* If `elems` has only one ragged dimension, then the values passed to
`fn` will be `tf.Tensor`s.
* If `elems` has multiple ragged dimensions, then the values passed to
`fn` will be `tf.RaggedTensor`s with one fewer ragged dimension.
* If the result of `map_fn` should be a `RaggedTensor`, then use a
`tf.RaggedTensorSpec` to specify `fn_output_signature`.
* If `fn` returns `tf.Tensor`s with varying sizes, then use a
`tf.RaggedTensorSpec` with `ragged_rank=0` to combine them into a
single ragged tensor (which will have ragged_rank=1).
* If `fn` returns `tf.RaggedTensor`s, then use a `tf.RaggedTensorSpec`
with the same `ragged_rank`.
>>> # Example: RaggedTensor input
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.map_fn(tf.reduce_sum, rt, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([6, 0, 9, 6], dtype=int32)>
>>> # Example: RaggedTensor output
>>> elems = tf.constant([3, 5, 0, 2])
>>> tf.map_fn(tf.range, elems,
... fn_output_signature=tf.RaggedTensorSpec(shape=[None],
... dtype=tf.int32))
<tf.RaggedTensor [[0, 1, 2], [0, 1, 2, 3, 4], [], [0, 1]]>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `RaggedTensor`. If you wish to map a function over the
individual values, then you should use:
* `tf.ragged.map_flat_values(fn, rt)`
(if fn is expressible as TensorFlow ops)
* `rt.with_flat_values(map_fn(fn, rt.flat_values))`
(otherwise)
E.g.:
>>> rt = tf.ragged.constant([[1, 2, 3], [], [4, 5], [6]])
>>> tf.ragged.map_flat_values(lambda x: x + 2, rt)
<tf.RaggedTensor [[3, 4, 5], [], [6, 7], [8]]>
#### SparseTensors
`map_fn` supports `tf.sparse.SparseTensor` inputs and outputs. In particular:
* If `elems` is a `SparseTensor`, then `fn` will be called with each row
of that sparse tensor. In particular, the value passed to `fn` will be a
`tf.sparse.SparseTensor` with one fewer dimension than `elems`.
* If the result of `map_fn` should be a `SparseTensor`, then use a
`tf.SparseTensorSpec` to specify `fn_output_signature`. The individual
`SparseTensor`s returned by `fn` will be stacked into a single
`SparseTensor` with one more dimension.
>>> # Example: SparseTensor input
>>> st = tf.sparse.SparseTensor([[0, 0], [2, 0], [2, 1]], [2, 3, 4], [4, 4])
>>> tf.map_fn(tf.sparse.reduce_sum, st, fn_output_signature=tf.int32)
<tf.Tensor: shape=(4,), dtype=int32, numpy=array([2, 0, 7, 0], dtype=int32)>
>>> # Example: SparseTensor output
>>> tf.sparse.to_dense(
... tf.map_fn(tf.sparse.eye, tf.constant([2, 3]),
... fn_output_signature=tf.SparseTensorSpec(None, tf.float32)))
<tf.Tensor: shape=(2, 3, 3), dtype=float32, numpy=
array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]],
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]], dtype=float32)>
Note: `map_fn` should only be used if you need to map a function over the
*rows* of a `SparseTensor`. If you wish to map a function over the nonzero
values, then you should use:
* If the function is expressible as TensorFlow ops, use:
```python
tf.sparse.SparseTensor(st.indices, fn(st.values), st.dense_shape)
```
* Otherwise, use:
```python
tf.sparse.SparseTensor(st.indices, tf.map_fn(fn, st.values),
st.dense_shape)
```
#### `map_fn` vs. vectorized operations
`map_fn` will apply the operations used by `fn` to each element of `elems`,
resulting in `O(elems.shape[0])` total operations. This is somewhat
mitigated by the fact that `map_fn` can process elements in parallel.
However, a transform expressed using `map_fn` is still typically less
efficient than an equivalent transform expressed using vectorized operations.
`map_fn` should typically only be used if one of the following is true:
* It is difficult or expensive to express the desired transform with
vectorized operations.
* `fn` creates large intermediate values, so an equivalent vectorized
transform would take too much memory.
* Processing elements in parallel is more efficient than an equivalent
vectorized transform.
* Efficiency of the transform is not critical, and using `map_fn` is
more readable.
E.g., the example given above that maps `fn=lambda t: tf.range(t, t + 3)`
across `elems` could be rewritten more efficiently using vectorized ops:
>>> elems = tf.constant([3, 5, 2])
>>> tf.range(3) + tf.expand_dims(elems, 1)
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
In some cases, `tf.vectorized_map` can be used to automatically convert a
function to a vectorized equivalent.
#### Eager execution
When executing eagerly, `map_fn` does not execute in parallel even if
`parallel_iterations` is set to a value > 1. You can still get the
performance benefits of running a function in parallel by using the
`tf.function` decorator:
>>> fn=lambda t: tf.range(t, t + 3)
>>> @tf.function
... def func(elems):
... return tf.map_fn(fn, elems, parallel_iterations=3)
>>> func(tf.constant([3, 5, 2]))
<tf.Tensor: shape=(3, 3), dtype=int32, numpy=
array([[3, 4, 5],
[5, 6, 7],
[2, 3, 4]], dtype=int32)>
Note: if you use the `tf.function` decorator, any non-TensorFlow Python
code that you may have written in your function won't get executed. See
`tf.function` for more details. The recommendation would be to debug without
`tf.function` but switch to it to get performance benefits of running `map_fn`
in parallel.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same (possibly nested) structure as `elems`. Its output must have the
same structure as `fn_output_signature` if one is provided; otherwise it
must have the same structure as `elems`.
elems: A tensor or (possibly nested) sequence of tensors, each of which will
be unstacked along their first dimension. `fn` will be applied to the
nested sequence of the resulting slices. `elems` may include ragged and
sparse tensors. `elems` must consist of at least one tensor.
dtype: Deprecated: Equivalent to `fn_output_signature`.
parallel_iterations: (optional) The number of iterations allowed to run in
parallel. When graph building, the default value is 10. While executing
eagerly, the default value is set to 1.
back_prop: (optional) False disables support for back propagation.
swap_memory: (optional) True enables GPU-CPU memory swapping.
infer_shape: (optional) False disables tests for consistent output shapes.
name: (optional) Name prefix for the returned tensors.
fn_output_signature: The output signature of `fn`. Must be specified if
`fn`'s input and output signatures are different (i.e., if their
structures, dtypes, or tensor types do not match).
`fn_output_signature` can be specified using any of the following:
* A `tf.DType` or `tf.TensorSpec` (to describe a `tf.Tensor`)
* A `tf.RaggedTensorSpec` (to describe a `tf.RaggedTensor`)
* A `tf.SparseTensorSpec` (to describe a `tf.sparse.SparseTensor`)
* A (possibly nested) tuple, list, or dict containing the above types.
Returns:
A tensor or (possibly nested) sequence of tensors. Each tensor stacks the
results of applying `fn` to tensors unstacked from `elems` along the first
dimension, from first to last. The result may include ragged and sparse
tensors.
Raises:
TypeError: if `fn` is not callable or the structure of the output of
`fn` and `fn_output_signature` do not match.
ValueError: if the lengths of the output of `fn` and `fn_output_signature`
do not match, or if the `elems` does not contain any tensor.
Examples:
>>> elems = np.array([1, 2, 3, 4, 5, 6])
>>> tf.map_fn(lambda x: x * x, elems)
<tf.Tensor: shape=(6,), dtype=int64, numpy=array([ 1, 4, 9, 16, 25, 36])>
>>> elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
>>> tf.map_fn(lambda x: x[0] * x[1], elems, fn_output_signature=tf.int64)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, 2, -3])>
>>> elems = np.array([1, 2, 3])
>>> tf.map_fn(lambda x: (x, -x), elems,
... fn_output_signature=(tf.int64, tf.int64))
(<tf.Tensor: shape=(3,), dtype=int64, numpy=array([1, 2, 3])>,
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([-1, -2, -3])>)
"""
# This function uses a `while_loop` to call `fn` on each value of the input
# tensor(s) (unstacked on dimension 0). The following sequence of variables
# are used to transform the input tensor(s) (`elems`) into the output
# tensor(s) (`result`):
#
# - Preparing and unstacking input values for the while_loop:
# - elems: The input tensor(s) to map_fn. May include composite tensors.
# - elems_flat: Flattened list of tensors from elems (using nest.flatten)
# May include composite tensors.
# - elems_batchable: Concatenation of "batchable tensor lists" for each
# tensor in elems_flat. This "boxes" composite tensors
# into sliceable tf.Tensor objects. For more info see:
# TensorSpec._to_batched_tensor_list
# - elems_batchable_ta: List of TensorArrays used to unstack each Tensor
# in elems_batchable into elems_value_batchable.
#
# - Calling `fn` on each unstacked value in the body of the while_loop:
# - elems_value_batchable: Single unstacked value from elems_batchable.
# - elems_value_flat: Single unstacked value from elems_flat,
# constructed from elems_value_batchable (using
# TensorSpec._from_tensor_list).
# - elems_value: Single unstacked value from elems (the input to fn).
# - result_value: Result of calling `fn(elems_value)`. May contain
# composite tensors.
# - result_value_flat: Flattened list of tensors from result_value.
# May contain composite tensors.
# - result_value_batchable: Concatenation of batchable tensor lists for
# each tensor in result_value_flat
# (using TensorSpec._to_tensor_list).
#
# - Collecting and stacking output values from the while_loop:
# - result_batchable_ta: List of TensorArrays used to stack each tensor
# ta result_value_batchable into result_batchable.
# - result_batchable: Stacked tensors from result_batchable_ta.
# - result_flat: Flat list of tensors for the result, constructed from
# results bactchable (using TensorSpec._from_tensor_list).
# - result: Structured result value packed from results flat
# (using nest.pack_sequence_as).
if fn_output_signature is None:
fn_output_signature = dtype
if not callable(fn):
raise TypeError("fn must be callable.")
in_graph_mode = not context.executing_eagerly()
# Set the default number of parallel_iterations depending on graph/eager mode.
if in_graph_mode and not parallel_iterations:
parallel_iterations = 10
elif not in_graph_mode and not parallel_iterations:
parallel_iterations = 1
elif not in_graph_mode and parallel_iterations > 1:
logging.log_first_n(
logging.WARN, "Setting parallel_iterations > 1 has no "
"effect when executing eagerly. Consider calling map_fn"
" with tf.function to execute fn in "
"parallel.", 1)
parallel_iterations = 1
# Flatten the input tensors, and get the TypeSpec for each one.
elems_flat = nest.flatten(elems)
# Check in case this is an empty list
if len(elems_flat) == 0:
raise ValueError(
"elems must be a Tensor or (possibly nested) sequence of Tensors. "
"Got {}, which does not contain any Tensors.".format(elems))
elems_flat_signature = [type_spec.type_spec_from_value(e) for e in elems_flat]
elems_unflatten = lambda x: nest.pack_sequence_as(elems, x)
# Flatten fn's output signature.
if fn_output_signature is None:
# If fn_output_signature was not specified, then assume that it matches the
# input signature.
result_flat_signature = [
_most_general_compatible_type(s)._unbatch() # pylint: disable=protected-access
for s in elems_flat_signature
]
result_unflatten = elems_unflatten
else:
result_flat_signature = [
_dtype_to_spec(d) for d in nest.flatten(fn_output_signature)
]
result_unflatten = lambda x: nest.pack_sequence_as(fn_output_signature, x)
with ops.name_scope(name, "map", elems_flat):
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode:
# Any get_variable calls in fn will cache the first call locally
# and not issue repeated network I/O requests for each iteration.
varscope = vs.get_variable_scope()
varscope_caching_device_was_none = False
if varscope.caching_device is None:
# TODO(ebrevdo): Change to using colocate_with here and in other
# methods.
varscope.set_caching_device(lambda op: op.device)
varscope_caching_device_was_none = True
elems_flat = [
ops.convert_to_tensor_or_composite(t, name="elem") for t in elems_flat
]
# Check that inputs are not scalars.
first_elem = elems_flat[0]
elems_static_shape = first_elem.shape
if elems_static_shape.ndims is not None and elems_static_shape.ndims < 1:
if len(elems_flat) == 1:
raise ValueError("elems must be a 1+ dimensional Tensor, not a scalar")
else:
raise ValueError(
"elements in elems must be 1+ dimensional Tensors, not scalars"
)
# Box any composite tensors into tensor lists.
elems_batchable = _elems_flat_to_batchable(elems_flat)
# Find the number of iterations, n. (may be known statically.)
n_static = tensor_shape.Dimension(
tensor_shape.dimension_value(
elems_batchable[0].get_shape().with_rank_at_least(1)[0]))
for tensor in elems_batchable[1:]:
n_static.assert_is_compatible_with(
tensor_shape.Dimension(
tensor_shape.dimension_value(
tensor.get_shape().with_rank_at_least(1)[0])))
n = n_static.value or array_ops.shape(elems_batchable[0])[0]
# Convert elems to tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
elems_batchable_ta = [
tensor_array_ops.TensorArray(
dtype=t.dtype, size=n, dynamic_size=False, infer_shape=True)
for t in elems_batchable
]
# Unpack elements
elems_batchable_ta = [
ta.unstack(t) for (ta, t) in zip(elems_batchable_ta, elems_batchable)
]
i = constant_op.constant(0)
# Prepare result tensor array.
# TODO(edloper): Should we set infer_shape=False for composite tensors?
result_batchable_tensor_spec = (
_result_flat_signature_to_batchable_tensor_spec(result_flat_signature))
result_batchable_ta = []
for spec in result_batchable_tensor_spec:
result_batchable_ta.append(
tensor_array_ops.TensorArray(
dtype=spec.dtype, size=n, dynamic_size=False,
infer_shape=infer_shape, element_shape=spec.shape))
def compute(i, tas):
"""The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if fn_output_signature and result_value structure don't match
ValueType: if fn_output_signature and result_value lengths don't match
"""
elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]
elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable,
elems_flat_signature)
elems_value = elems_unflatten(elems_value_flat)
ag_ctx = autograph_ctx.control_status_ctx()
autographed_fn = autograph.tf_convert(fn, ag_ctx)
result_value = autographed_fn(elems_value)
nest.assert_same_structure(fn_output_signature or elems, result_value)
result_value_flat = nest.flatten(result_value)
result_value_batchable = _result_value_flat_to_batchable(
result_value_flat, result_flat_signature)
tas = [
ta.write(i, value) for (ta, value) in zip(tas, result_value_batchable)
]
return (i + 1, tas)
_, r_a = control_flow_ops.while_loop(
lambda i, _: i < n,
compute, (i, result_batchable_ta),
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
maximum_iterations=n)
result_batchable = [r.stack() for r in r_a]
# Update each output tensor w/ static shape info about the outer dimension.
for r in result_batchable:
r.set_shape(tensor_shape.TensorShape(n_static).concatenate(
r.get_shape()[1:]))
# TODO(akshayka): Remove the in_graph_mode check once caching devices are
# supported in Eager
if in_graph_mode and varscope_caching_device_was_none:
varscope.set_caching_device(None)
result_flat = _result_batchable_to_flat(result_batchable,
result_flat_signature,
n_static)
result = result_unflatten(result_flat)
return result
def _dtype_to_spec(d):
if not isinstance(d, type_spec.TypeSpec):
d = tensor_spec.TensorSpec(None, d)
return d
def _most_general_compatible_type(spec):
"""Returns the most general TypeSpec compatible with `spec`."""
# TODO(edloper): Consider adding most_general_compatible_type to TypeSpec API
if isinstance(spec, tensor_spec.TensorSpec):
return tensor_spec.TensorSpec(None, spec.dtype)
elif isinstance(spec, ragged_tensor.RaggedTensorSpec):
# pylint: disable=protected-access
return ragged_tensor.RaggedTensorSpec(None, spec._dtype, spec._ragged_rank,
spec._row_splits_dtype)
elif isinstance(spec, sparse_tensor.SparseTensorSpec):
# pylint: disable=protected-access
return sparse_tensor.SparseTensorSpec(None, spec.dtype)
else:
return spec
def _result_flat_signature_to_batchable_tensor_spec(result_flat_signature):
"""Converts result_flat_signature -> result_batchable_tensor_specs."""
tensor_specs = []
for spec in result_flat_signature:
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not generate %s outputs" % (spec,))
tensor_specs.extend(spec._flat_tensor_specs) # pylint: disable=protected-access
return tensor_specs
def _elems_flat_to_batchable(elems_flat):
"""Converts elems_flat -> elems_batchable."""
elems_batchable = []
for elems_tensor in elems_flat:
spec = type_spec.type_spec_from_value(elems_tensor)
if not isinstance(spec, type_spec.BatchableTypeSpec):
raise TypeError("map_fn can not consume %s inputs: got %r" %
(spec, elems_tensor))
# pylint: disable=protected-access
elems_batchable.extend(spec._to_batched_tensor_list(elems_tensor))
return elems_batchable
def _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature):
"""Converts elems_value_batchable -> elems_value_flat."""
elems_value_flat = []
i = 0
for spec in elems_flat_signature:
# pylint: disable=protected-access
spec = spec._unbatch()
tensor_list = elems_value_batchable[i:i + len(spec._flat_tensor_specs)]
elems_value_flat.append(spec._from_compatible_tensor_list(tensor_list))
i += len(tensor_list)
assert i == len(elems_value_batchable)
return elems_value_flat
def _result_value_flat_to_batchable(result_value_flat, result_flat_signature):
"""Converts result_value_flat -> result_value_batchable."""
result_value_batchable = []
for (r_value, r_spec) in zip(result_value_flat, result_flat_signature):
if isinstance(r_spec, tensor_spec.TensorSpec):
result_value_batchable.append(r_value)
else:
if not r_spec.is_compatible_with(r_value):
raise ValueError(
"Error in map_fn:\n Expected `fn` to return a:\n %s\n"
" But it returned a:\n %s\n (value=%s)\n"
" To fix, update the `fn_output_signature` (or `dtype`) "
"argument to `map_fn`." %
(r_spec, type_spec.type_spec_from_value(r_value), r_value))
result_value_batchable.extend(r_spec._to_tensor_list(r_value)) # pylint: disable=protected-access
return result_value_batchable
def _result_batchable_to_flat(result_batchable, result_flat_signature,
batch_size):
"""Converts result_batchable -> result_flat."""
result_flat = []
i = 0
for spec in result_flat_signature:
# pylint: disable=protected-access
num_tensors = len(spec._flat_tensor_specs)
result_flat.append(
spec._batch(batch_size)._from_compatible_tensor_list(
result_batchable[i:i + num_tensors]))
i += num_tensors
assert i == len(result_batchable)
return result_flat
@tf_export("map_fn", v1=[])
@deprecation.deprecated_arg_values(
None,
"""back_prop=False is deprecated. Consider using tf.stop_gradient instead.
Instead of:
results = tf.map_fn(fn, elems, back_prop=False)
Use:
results = tf.nest.map_structure(tf.stop_gradient, tf.map_fn(fn, elems))""",
warn_once=True,
back_prop=False)
@deprecation.deprecated_args(None, "Use fn_output_signature instead", "dtype")
def map_fn_v2(fn,
elems,
dtype=None,
parallel_iterations=None,
back_prop=True,
swap_memory=False,
infer_shape=True,
name=None,
fn_output_signature=None):
"""Transform `elems` by applying `fn` to each element unstacked on axis 0."""
if fn_output_signature is None:
fn_output_signature = dtype
return map_fn(
fn=fn,
elems=elems,
fn_output_signature=fn_output_signature,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory,
infer_shape=infer_shape,
name=name)
# Docstring for v2 is the same as v1, except that back_prop is deprecated.
map_fn_v2.__doc__ = re.sub(
r"( back_prop: \(optional\) )(.*)",
r"\1Deprecated: prefer using `tf.stop_gradient` instead. \2",
map_fn.__doc__)
assert "prefer using `tf.stop_gradient` instead" in map_fn_v2.__doc__
| |
"""
Created on 18/12/2013
@author: Nacho, BorjaGB
"""
from datetime import datetime
from lpentities.computation import Computation
from lpentities.data_source import DataSource
from lpentities.dataset import Dataset
from lpentities.indicator import Indicator
from lpentities.instant import Instant
from lpentities.license import License
from lpentities.measurement_unit import MeasurementUnit
from lpentities.observation import Observation
from lpentities.organization import Organization
from lpentities.slice import Slice
from lpentities.user import User
from lpentities.value import Value
from lpentities.year_interval import YearInterval
from reconciler.country_reconciler import CountryReconciler
from model2xml.model2xml import ModelToXMLTransformer
from requests.exceptions import ConnectionError
from es.weso.worldbank.rest.rest_client import RestClient
class Parser(object):
countries = []
observations = []
def __init__(self, config, log):
self.logger = log
self.config = config
self._reconciler = CountryReconciler()
self._look_for_historical = self.config.getboolean("TRANSLATOR", "historical_mode")
if not self._look_for_historical:
self._historical_year = self.config.getint("TRANSLATOR", "historical_year")
self._org_id = self.config.get("TRANSLATOR", "org_id")
self._obs_int = self.config.getint("TRANSLATOR", "obs_int")
self._sli_int = self.config.getint("TRANSLATOR", "sli_int")
self._dat_int = self.config.getint("TRANSLATOR", "dat_int")
self._igr_int = self.config.getint("TRANSLATOR", "igr_int")
self.countries_url = self.config.get('URLs', 'country_list')
self.observations_url = self.config.get('URLs', 'indicator_pattern')
self.data_sources = dict(self.config.items('data_sources'))
self._organization = self._build_default_organization()
self._user = self._build_default_user()
self._license = self._build_default_license()
def run(self):
self.extract_countries()
self.extract_observations()
self.model_to_xml()
def model_to_xml(self):
for datasource in self._user.organization.data_sources:
for dataset in datasource.datasets:
if len(dataset.observations) > 0:
transformer = ModelToXMLTransformer(dataset,
ModelToXMLTransformer.API,
self._user,
self.config.get("base_api"))
transformer.run()
else:
self.logger.warning("Dataset %s has no observations"%dataset.dataset_id)
def extract_countries(self):
response = RestClient.get(self.countries_url, {"format": "json"})
countries = response[1]
for country in countries:
try:
self.countries.append(self._reconciler.get_country_by_iso2(country['iso2Code']))
except:
self.logger.warning("No country matches found for iso code" + country['iso2Code'])
def _build_default_organization(self):
return Organization(chain_for_id=self._org_id,
name=self.config.get("ORGANIZATION", "name"),
url=self.config.get("ORGANIZATION", "url"),
url_logo=self.config.get("ORGANIZATION", "url_logo"),
description_en=self._read_config_value("ORGANIZATION", "description_en"),
description_es=self._read_config_value("ORGANIZATION", "description_es"),
description_fr=self._read_config_value("ORGANIZATION", "description_fr"))
def _read_config_value(self, section, field):
return (self.config.get(section, field)).decode(encoding="utf-8")
def _build_default_user(self):
return User(user_login="worldbank_importer",
organization=self._organization)
def _build_default_license(self):
return License(name=self.config.get("LICENSE", "name"),
description=self.config.get("LICENSE", "description"),
republish=self.config.get("LICENSE", "republish"),
url=self.config.get("LICENSE", "url"))
def _build_data_source(self, data_source_name):
data_source = DataSource(chain_for_id=self._org_id,
int_for_id=self.config.get("datasource", "datasource_id"),
name=data_source_name,
organization=self._organization)
return data_source
def _build_data_set(self, data_source):
frequency = Dataset.YEARLY
dataset = Dataset(chain_for_id=self._org_id,
int_for_id=self._dat_int,
frequency=frequency,
license_type=self._license,
source=data_source)
self._dat_int += 1 # Updating dataset int id value
return dataset
def _build_indicator(self, indicator_code, dataset, measurement_unit):
indicator = Indicator(chain_for_id=self._org_id,
int_for_id=int(self.config.get(indicator_code, "indicator_id")),
name_en=self.config.get(indicator_code, "name_en").decode(encoding="utf-8"),
name_es=self.config.get(indicator_code, "name_es").decode(encoding="utf-8"),
name_fr=self.config.get(indicator_code, "name_fr").decode(encoding="utf-8"),
description_en=self.config.get(indicator_code, "desc_en").decode(encoding="utf-8"),
description_es=self.config.get(indicator_code, "desc_es").decode(encoding="utf-8"),
description_fr=self.config.get(indicator_code, "desc_fr").decode(encoding="utf-8"),
dataset=dataset,
measurement_unit=measurement_unit,
preferable_tendency=self._get_preferable_tendency_of_indicator(self.config.get(indicator_code, "indicator_tendency")),
topic=self.config.get(indicator_code, "indicator_topic"))
return indicator
def _build_slice(self, country, dataset, indicator):
slice_object = Slice(chain_for_id=self._org_id,
int_for_id=self._sli_int,
dimension=country,
dataset=dataset,
indicator=indicator)
self._sli_int += 1 # Updating int id slice value
return slice_object
def _build_value(self, indicator, country, date, value_element):
value_object = Value(value_element,
Value.FLOAT,
Value.AVAILABLE)
if value_object.value is None:
value_object = Value(None,
None,
Value.MISSING)
self.logger.warning('Missing value for ' + indicator.name_en + ', ' + country.name + ', ' + date)
return value_object
def _filter_historical_observations(self, year):
if self._look_for_historical:
return True
else :
if isinstance(year, YearInterval):
return year.year > self._historical_year
else:
return year.end_time > self._historical_year
def _build_observation(self, indicator, dataset, country, value, date):
value_object = self._build_value(indicator,
country,
date,
value)
time = YearInterval(year=int(date))
observation = Observation(chain_for_id=self._org_id,
int_for_id=self._obs_int,
ref_time=time,
issued=Instant(datetime.now()),
computation=Computation(Computation.RAW),
value=value_object,
indicator=indicator,
dataset=dataset)
self._obs_int += 1 # Updating obs int value
return observation
def extract_observations(self):
for data_source_name in self.data_sources:
indicators_section = self.config.get('data_sources', data_source_name)
requested_indicators = dict(self.config.items(indicators_section))
data_source = self._build_data_source(data_source_name)
self._organization.add_data_source(data_source)
dataset = self._build_data_set(data_source)
data_source.add_dataset(dataset)
#print data_source_name
for indicator_element in requested_indicators:
indicator_code = self.config.get(indicators_section, indicator_element)
measurement_unit = MeasurementUnit(name = self.config.get(indicator_code, "indicator_unit_name"),
convert_to = self.config.get(indicator_code, "indicator_unit_type"))
indicator = self._build_indicator(indicator_code, dataset, measurement_unit)
print '\t' + indicator.name_en + "--------------" + indicator.preferable_tendency + "-----------"
for country in self.countries:
slice_object = self._build_slice(country, dataset, indicator)
dataset.add_slice(slice_object) # TESTING EFFECT
#print '\t\t' + slice_object.slice_id + '\t' + slice_object.dimension.get_dimension_string()
uri = self.observations_url.replace('{ISO3CODE}', country.iso3)
uri = uri.replace('{INDICATOR.CODE}', indicator_code)
try:
response = RestClient.get(uri, {"format": "json"})
observations = response[1]
if observations is not None:
for observation_element in observations:
#print observation_element
observation = self._build_observation(indicator,
dataset,
country,
observation_element['value'],
observation_element['date'])
if self._filter_historical_observations(observation.ref_time):
country.add_observation(observation)
dataset.add_observation(observation)
slice_object.add_observation(observation)
#if observation.value.obs_status is not Value.MISSING:
# print '\t\t\t' + observation.ref_time.get_time_string() + '\t' + str(observation.value.value) + ' ' + indicator.measurement_unit.name
#else:
# print '\t\t\t' + observation.ref_time.get_time_string() + '\tMissing'
except (KeyError, ConnectionError, ValueError):
self.logger.error('Error retrieving response for \'' + uri + '\'')
self.logger.info("FINISHED: " + indicator.name_en)
@staticmethod
def _get_preferable_tendency_of_indicator(tendency):
if tendency.lower() == "decrease":
return Indicator.DECREASE
else:
return Indicator.INCREASE
| |
from __future__ import division
"""
Author: Keith Bourgoin
"""
__license__ = """
Copyright 2015 Parse.ly, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["Cluster"]
import logging
import random
import time
import weakref
from .broker import Broker
from .exceptions import (ConsumerCoordinatorNotAvailable,
KafkaException,
UnknownTopicOrPartition)
from .protocol import ConsumerMetadataRequest, ConsumerMetadataResponse
from .topic import Topic
from .utils.compat import iteritems, range
log = logging.getLogger(__name__)
class TopicDict(dict):
"""Dictionary which will attempt to auto-create unknown topics."""
def __init__(self, cluster, *args, **kwargs):
super(TopicDict, self).__init__(*args, **kwargs)
self._cluster = weakref.proxy(cluster)
def __missing__(self, key):
log.warning('Topic %s not found. Attempting to auto-create.', key)
if self._create_topic(key):
return self[key]
else:
raise UnknownTopicOrPartition('Unknown topic: {topic}'.format(topic=key))
def _create_topic(self, topic_name):
"""Auto-create a topic.
Not exposed in the cluster or broker because this is *only*
auto-creation. When there's a real API for creating topics,
with settings and everything, we'll implement that. To expose just
this now would be disingenuous, since it's features would be hobbled.
"""
if len(self._cluster.brokers) == 0:
log.warning("No brokers found. This is probably because of "
"KAFKA-2154, which will be fixed in Kafka 0.8.3")
raise KafkaException("Unable to retrieve metdata. Can't auto-create topic. See log for details.")
# Auto-creating will take a moment, so we try 5 times.
for i in range(5):
# Auto-creating is as simple as issuing a metadata request
# solely for that topic. The update is just to be sure
# our `Cluster` knows about it.
self._cluster.brokers[list(self._cluster.brokers.keys())[0]].request_metadata(topics=[topic_name])
self._cluster.update()
if topic_name in self:
log.info('Topic %s successfully auto-created.', topic_name)
return True
time.sleep(0.1)
class Cluster(object):
"""
A Cluster is a high-level abstraction of the collection of brokers and
topics that makes up a real kafka cluster.
"""
def __init__(self,
hosts,
handler,
socket_timeout_ms=30 * 1000,
offsets_channel_socket_timeout_ms=10 * 1000,
exclude_internal_topics=True,
source_address=''):
"""Create a new Cluster instance.
:param hosts: Comma-separated list of kafka hosts to used to connect.
:type hosts: bytes
:param handler: The concurrency handler for network requests.
:type handler: :class:`pykafka.handlers.Handler`
:param socket_timeout_ms: The socket timeout (in milliseconds) for
network requests
:type socket_timeout_ms: int
:param offsets_channel_socket_timeout_ms: The socket timeout (in
milliseconds) when reading responses for offset commit and
offset fetch requests.
:type offsets_channel_socket_timeout_ms: int
:param exclude_internal_topics: Whether messages from internal topics
(specifically, the offsets topic) should be exposed to consumers.
:type exclude_internal_topics: bool
:param source_address: The source address for socket connections
:type source_address: str `'host:port'`
"""
self._seed_hosts = hosts
self._socket_timeout_ms = socket_timeout_ms
self._offsets_channel_socket_timeout_ms = offsets_channel_socket_timeout_ms
self._handler = handler
self._brokers = {}
self._topics = TopicDict(self)
self._exclude_internal_topics = exclude_internal_topics
self._source_address = source_address
self._source_host = self._source_address.split(':')[0]
self._source_port = 0
if ':' in self._source_address:
self._source_port = int(self._source_address.split(':')[1])
self.update()
def __repr__(self):
return "<{module}.{name} at {id_} (hosts={hosts})>".format(
module=self.__class__.__module__,
name=self.__class__.__name__,
id_=hex(id(self)),
hosts=self._seed_hosts,
)
@property
def brokers(self):
"""The dict of known brokers for this cluster"""
return self._brokers
@property
def topics(self):
"""The dict of known topics for this cluster"""
return self._topics
@property
def handler(self):
"""The concurrency handler for network requests"""
return self._handler
def _get_metadata(self):
"""Get fresh cluster metadata from a broker."""
# Works either on existing brokers or seed_hosts list
brokers = [b for b in self.brokers.values() if b.connected]
if brokers:
for broker in brokers:
response = broker.request_metadata()
if response is not None:
return response
else: # try seed hosts
brokers = self._seed_hosts.split(',')
for broker_str in brokers:
try:
h, p = broker_str.split(':')
broker = Broker(-1, h, int(p), self._handler,
self._socket_timeout_ms,
self._offsets_channel_socket_timeout_ms,
buffer_size=1024 * 1024,
source_host=self._source_host,
source_port=self._source_port)
response = broker.request_metadata()
if response is not None:
return response
except Exception as e:
log.error('Unable to connect to broker %s', broker_str)
log.exception(e)
# Couldn't connect anywhere. Raise an error.
raise RuntimeError(
'Unable to connect to a broker to fetch metadata. See logs.')
def _update_brokers(self, broker_metadata):
"""Update brokers with fresh metadata.
:param broker_metadata: Metadata for all brokers.
:type broker_metadata: Dict of `{name: metadata}` where `metadata` is
:class:`pykafka.protocol.BrokerMetadata` and `name` is str.
"""
# FIXME: A cluster with no topics returns no brokers in metadata
# Remove old brokers
removed = set(self._brokers.keys()) - set(broker_metadata.keys())
if len(removed) > 0:
log.info('Removing %d brokers', len(removed))
for id_ in removed:
log.debug('Removing broker %s', self._brokers[id_])
self._brokers.pop(id_)
# Add/update current brokers
if len(broker_metadata) > 0:
log.info('Discovered %d brokers', len(broker_metadata))
for id_, meta in iteritems(broker_metadata):
if id_ not in self._brokers:
log.debug('Discovered broker id %s: %s:%s', id_, meta.host, meta.port)
self._brokers[id_] = Broker.from_metadata(
meta, self._handler, self._socket_timeout_ms,
self._offsets_channel_socket_timeout_ms,
buffer_size=1024 * 1024,
source_host=self._source_host,
source_port=self._source_port
)
else:
broker = self._brokers[id_]
if meta.host == broker.host and meta.port == broker.port:
continue # no changes
# TODO: Can brokers update? Seems like a problem if so.
# Figure out and implement update/disconnect/reconnect if
# needed.
raise Exception('Broker host/port change detected! %s', broker)
def _update_topics(self, metadata):
"""Update topics with fresh metadata.
:param metadata: Metadata for all topics.
:type metadata: Dict of `{name, metadata}` where `metadata` is
:class:`pykafka.protocol.TopicMetadata` and `name` is str.
"""
# Remove old topics
removed = set(self._topics.keys()) - set(metadata.keys())
if len(removed) > 0:
log.info("Removing %d topics", len(removed))
for name in removed:
log.debug('Removing topic %s', self._topics[name])
self._topics.pop(name)
# Add/update partition information
if len(metadata) > 0:
log.info("Discovered %d topics", len(metadata))
for name, meta in iteritems(metadata):
if not self._should_exclude_topic(name):
if name not in self._topics:
self._topics[name] = Topic(self, meta)
log.debug('Discovered topic %s', self._topics[name])
else:
self._topics[name].update(meta)
def _should_exclude_topic(self, topic_name):
"""Should this topic be excluded from the list shown to the client?"""
if not self._exclude_internal_topics:
return False
return topic_name.startswith(b"__")
def get_offset_manager(self, consumer_group):
"""Get the broker designated as the offset manager for this consumer group.
Based on Step 1 at https://cwiki.apache.org/confluence/display/KAFKA/Committing+and+fetching+consumer+offsets+in+Kafka
:param consumer_group: The name of the consumer group for which to
find the offset manager.
:type consumer_group: str
"""
log.info("Attempting to discover offset manager for consumer group '%s'",
consumer_group)
# arbitrarily choose a broker, since this request can go to any
broker = self.brokers[random.choice(list(self.brokers.keys()))]
MAX_RETRIES = 5
for i in range(MAX_RETRIES):
if i > 0:
log.debug("Retrying offset manager discovery")
time.sleep(i * 2)
req = ConsumerMetadataRequest(consumer_group)
future = broker.handler.request(req)
try:
res = future.get(ConsumerMetadataResponse)
except ConsumerCoordinatorNotAvailable:
log.error('Error discovering offset manager.')
if i == MAX_RETRIES - 1:
raise
else:
coordinator = self.brokers.get(res.coordinator_id, None)
if coordinator is None:
raise Exception('Coordinator broker with id {id_} not found'.format(id_=res.coordinator_id))
log.info("Found coordinator broker with id %s", res.coordinator_id)
return coordinator
def update(self):
"""Update known brokers and topics."""
metadata = self._get_metadata()
if len(metadata.brokers) == 0 and len(metadata.topics) == 0:
log.warning('No broker metadata found. If this is a fresh cluster, '
'this may be due to a bug in Kafka. You can force '
'broker metadata to be returned by manually creating '
'a topic in the cluster. See '
'https://issues.apache.org/jira/browse/KAFKA-2154 '
'for information. Please note: topic auto-creation '
'will NOT work. You need to create at least one topic '
'manually using the Kafka CLI tools.')
self._update_brokers(metadata.brokers)
self._update_topics(metadata.topics)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from heat.engine import resources
from heat.engine import properties
from heat.openstack.common.gettextutils import _
from docutils import nodes
from sphinx.util.compat import Directive
class resourcepages(nodes.General, nodes.Element):
pass
class ResourcePages(Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {}
def run(self):
prefix = self.arguments and self.arguments.pop() or None
content = []
for resource_type, resource_class in _all_resources(prefix):
self.resource_type = resource_type
self.resource_class = resource_class
section = self._section(content, resource_type, '%s')
self.props_schemata = properties.schemata(
self.resource_class.properties_schema)
cls_doc = resource_class.__doc__
if cls_doc:
para = nodes.paragraph('', cls_doc)
section.append(para)
self.contribute_properties(section)
self.contribute_attributes(section)
self.contribute_hot_syntax(section)
self.contribute_yaml_syntax(section)
self.contribute_json_syntax(section)
return content
def _section(self, parent, title, id_pattern):
id = id_pattern % self.resource_type
section = nodes.section(ids=[id])
parent.append(section)
title = nodes.title('', title)
section.append(title)
return section
def _prop_syntax_example(self, prop):
if not prop:
return 'Value'
if prop.type == properties.LIST:
schema = lambda i: prop.schema[i] if prop.schema else None
sub_type = [self._prop_syntax_example(schema(i))
for i in range(2)]
return '[%s, %s, ...]' % tuple(sub_type)
elif prop.type == properties.MAP:
def sub_props():
for sub_key, sub_value in prop.schema.items():
if sub_value.implemented:
yield '"%s": %s' % (
sub_key, self._prop_syntax_example(sub_value))
return '{%s}' % (', '.join(sub_props()) if prop.schema else '...')
else:
return prop.type
def contribute_hot_syntax(self, parent):
section = self._section(parent, _('HOT Syntax'), '%s-hot')
props = []
for prop_key in sorted(self.props_schemata.keys()):
prop = self.props_schemata[prop_key]
if prop.implemented:
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
template = '''heat_template_version: 2013-05-23
...
resources:
...
the_resource:
type: %s
properties:
%s''' % (self.resource_type, '\n '.join(props))
block = nodes.literal_block('', template)
section.append(block)
def contribute_yaml_syntax(self, parent):
section = self._section(parent, _('YAML Syntax'), '%s-yaml')
props = []
for prop_key in sorted(self.props_schemata.keys()):
prop = self.props_schemata[prop_key]
if prop.implemented:
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
template = '''HeatTemplateFormatVersion: '2012-12-12'
...
Resources:
...
TheResource:
Type: %s
Properties:
%s''' % (self.resource_type, '\n '.join(props))
block = nodes.literal_block('', template)
section.append(block)
def contribute_json_syntax(self, parent):
section = self._section(parent, _('JSON Syntax'), '%s-json')
props = []
for prop_key in sorted(self.props_schemata.keys()):
prop = self.props_schemata[prop_key]
if prop.implemented:
props.append('"%s": %s' % (prop_key,
self._prop_syntax_example(prop)))
template = '''{
"AWSTemplateFormatVersion" : "2010-09-09",
...
"Resources" : {
"TheResource": {
"Type": "%s",
"Properties": {
%s
}
}
}
}''' % (self.resource_type, ',\n '.join(props))
block = nodes.literal_block('', template)
section.append(block)
def contribute_property(self, prop_list, prop_key, prop):
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
prop_item.append(nodes.classifier('', prop.type))
definition = nodes.definition()
prop_item.append(definition)
if not prop.implemented:
para = nodes.inline('', _('Not implemented.'))
warning = nodes.note('', para)
definition.append(warning)
return
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
if prop.required:
para = nodes.paragraph('', _('Required property.'))
elif prop.default is not None:
para = nodes.paragraph(
'',
_('Optional property, defaults to "%s".') % prop.default)
else:
para = nodes.paragraph('', _('Optional property.'))
definition.append(para)
for constraint in prop.constraints:
para = nodes.paragraph('', str(constraint))
definition.append(para)
sub_schema = None
if prop.schema and prop.type == properties.MAP:
para = nodes.emphasis('', _('Map properties:'))
definition.append(para)
sub_schema = prop.schema
elif prop.schema and prop.type == properties.LIST:
para = nodes.emphasis(
'', _('List contents:'))
definition.append(para)
sub_schema = prop.schema
if sub_schema:
sub_prop_list = nodes.definition_list()
definition.append(sub_prop_list)
for sub_prop_key in sorted(sub_schema.keys()):
sub_prop = sub_schema[sub_prop_key]
self.contribute_property(sub_prop_list, sub_prop_key, sub_prop)
def contribute_properties(self, parent):
if not self.props_schemata:
return
section = self._section(parent, _('Properties'), '%s-props')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key in sorted(self.props_schemata.keys()):
prop = self.props_schemata[prop_key]
self.contribute_property(prop_list, prop_key, prop)
def contribute_attributes(self, parent):
schema = self.resource_class.attributes_schema
if not schema:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key in sorted(schema.keys()):
description = schema[prop_key]
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
definition = nodes.definition()
prop_item.append(definition)
if description:
def_para = nodes.paragraph('', description)
definition.append(def_para)
def _all_resources(prefix=None):
g_env = resources.global_env()
all_resources = g_env.get_types()
for resource_type in sorted(all_resources):
resource_class = g_env.get_class(resource_type)
if not prefix or resource_type.startswith(prefix):
yield resource_type, resource_class
def setup(app):
resources.initialise()
app.add_node(resourcepages)
app.add_directive('resourcepages', ResourcePages)
| |
from bs4 import BeautifulSoup
import urllib
#csv is for the csv writer
import csv
import smtplib
import imapclient
"""
Included Sources:
1843 Magazine
BBC
Caixin
ChannelNewsAsia
ChinaChange
China Digital Times
ChinaFile
China Media Project
China Policy Institute
Chublic Opinion
Dui Hua Human Rights Journal
East Asia Forum
Foreign Policy
Free Tibet
The Guardian
LA Times
Quartz
Radio Free Asia
Reuters
SCMP
Sixth Tone
Sydney Morning Herald
TCHRD
Tibetan Review
Wall Street Journal
Washington Post
Xinhua
Xinjiang Review
Tried and Failed to Add:
nytimes
nybooks
hong kong free press
the nanfang
world policy journal
phayul
the national interest
ICT
yahoo
politics from the provinces
Medium
the diplomat
"""
#this will hold the output of headliner()
holder = {}
#this will hold the unmatched URLs output of headliner() - basically it catches the errors
unmatched_holder = []
#opens the input doc with the URLs
txt = open("tester.csv")
#is the contents of the doc
#inputs = txt.read()
#opens the output doc where the output data will live
output_txt = open("china-daily-email-local-output.txt", "w")
def headliner(url):
#iterate through the urls
parsed_urls = csv.reader(url)
for row in parsed_urls:
number = 0
row_contents = row[number]
print row_contents
number += 1
if "rfa" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Radio Free Asia: '
headline = soup.find_all('title')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "storytext"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "qz" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Quartz: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the htlm text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "item-body"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "foreignpolicy" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Foreign Policy: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "shares-position"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "sixthtone" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Sixth Tone: '
headline = soup.find_all('h3', {"class":"heading-1"})
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "content"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "washingtonpost" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Washington Post: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "article-body"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "latimes" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'LA Times: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "trb_ar_page"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "wsj" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'WSJ: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "wsj-article-wrap"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "chinadigitaltimes" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'China Digital Times: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "tchrd" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'TCHRD: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "caixin" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Caixin: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"id" : "txt_content", 'style' : 'font-size:14px;'}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "reuters" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Reuters: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("span", {"id" : "article-text"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "xinhuanet" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Xinhua: '
headline = soup.find_all('span', {'id':'bltitle'})
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("span", {"id" : "content", "class":"hei14 pcCon"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "theguardian" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'The Guardian: '
headline = soup.find_all('h1', {"class":"content__headline js-score", "itemprop":"headline"})
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "content__article-body from-content-api js-article__body", "itemprop":"articleBody", "data-test-id":"article-review-body"}).findAll('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "scmp" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'SCMP: '
headline = soup.find_all('h1', {"itemprop": "name headline"}, {"class": "title", "id":"page-title"})
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
headline_text += "\n"
#creats the body text
#This turns the html text into regular text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "eastasiaforum" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'East Asia Forum: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("section", {"class" : "content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "smh" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Sydney Morning Herald: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "article__body"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "cpianalysis" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'China Poilcy Institute: '
headline = soup.find_all('h2')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "post-content clear"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "xinjiangreview" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Xinjiang Review: '
headline = soup.find_all('h2')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "chinafile" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'ChinaFile: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "panel-pane pane-node-content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "chinachange" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'ChinaChange: '
headline = soup.find_all('h1')
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry-content clearfix"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "channelnewsasia" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'ChannelNewsAsia: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "news_detail"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "tibetanreview" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Tibetan Review: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "duihuahrjournal" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Dui Hua Human Rights Journal: '
headline = soup.find_all("h3")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "post-body entry-content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "cmp.hku" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'China Media Project: '
headline = soup.find_all("h2")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "bbc" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'BBC: '
headline = soup.find_all("h2")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "story-body__inner"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "1843magazine" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = '1843 Magazine: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("section", {"class" : "article__body page-and-article-content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "chublicopinion" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Chublic Opinion: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry-content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "freetibet" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Free Tibet: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("section", {"id" : "content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "globaltimes" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'Global Times: '
headline = soup.find_all("h3")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "span12 row-content"}).find_all('p')
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
elif "supremepeoplescourtmonitor" in row_contents:
#opens the url for read access
this_url = urllib.urlopen(row_contents).read()
#creates a new BS holder based on the URL
soup = BeautifulSoup(this_url, 'lxml')
#creates the headline section
headline_text = 'SPC Monitor: '
headline = soup.find_all("h1")
for element in headline:
headline_text += ''.join(element.findAll(text = True)).encode('utf-8').strip()
#creats the body text
#This turns the html text into regular text
#this line adds the URL to the output text
article_text = row_contents + "\n" + "\r"
#This finds each paragraph
article = soup.find("div", {"class" : "entry-content"}).find_all("p")
#for each paragraph
for element in article:
#add a line break and then the text part of the paragraph
#the .encode part fixes unicode bullshit
article_text += '\n' + ''.join(element.findAll(text = True)).encode('utf-8').strip()
holder[headline_text] = article_text
#if the input URL isn't in the list above, this message will be returned
else:
print "not a story from a known source"
unmatched_holder.append(row_contents)
#run headliner()
headliner(txt)
print "Here are holder.items: "
print
print holder
#iterates through the unmatched urls in unmatched_holder and writes them to the doc
for item in unmatched_holder:
output_txt.write("cannot process %s" %(str(item)))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\r")
#iterates through the headlines in holder and writes them to the doc
#this is the TOC
#this is where "headline_text" becomes "head" and "article_text" becomes "body"
for head, body in holder.items():
output_txt.write(str(head))
output_txt.write("\r")
#creates space between list of headlines and the stories
output_txt.write("\n")
output_txt.write("\n")
output_txt.write("\n")
output_txt.write("*************************************")
output_txt.write("\n")
#iterates through the headlines and body in holder and writes them to doc
#this is the body of the email
for head, body in holder.items():
output_txt.write("\r")
output_txt.write("\n")
output_txt.write(str(head))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write(str(body))
output_txt.write("\n")
output_txt.write("\r")
output_txt.write("\n")
output_txt.write("\n")
output_txt.write("\n")
txt.close()
output_txt.close()
| |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for MusicVAE data library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from magenta.models.music_vae import data
import magenta.music as mm
from magenta.music import constants
from magenta.music import testing_lib
from magenta.protobuf import music_pb2
import numpy as np
import tensorflow as tf
NO_EVENT = constants.MELODY_NO_EVENT
NOTE_OFF = constants.MELODY_NOTE_OFF
NO_DRUMS = 0
NO_CHORD = constants.NO_CHORD
def filter_instrument(sequence, instrument):
filtered_sequence = music_pb2.NoteSequence()
filtered_sequence.CopyFrom(sequence)
del filtered_sequence.notes[:]
filtered_sequence.notes.extend(
[n for n in sequence.notes if n.instrument == instrument])
return filtered_sequence
class NoteSequenceAugmenterTest(tf.test.TestCase):
def setUp(self):
sequence = music_pb2.NoteSequence()
sequence.tempos.add(qpm=60)
testing_lib.add_track_to_sequence(
sequence, 0,
[(32, 100, 2, 4), (33, 100, 6, 11), (34, 100, 11, 13),
(35, 100, 17, 18)])
testing_lib.add_track_to_sequence(
sequence, 1, [(57, 80, 4, 4.1), (58, 80, 12, 12.1)], is_drum=True)
testing_lib.add_chords_to_sequence(
sequence, [('N.C.', 0), ('C', 8), ('Am', 16)])
self.sequence = sequence
def testAugmentTranspose(self):
augmenter = data.NoteSequenceAugmenter(transpose_range=(2, 2))
augmented_sequence = augmenter.augment(self.sequence)
expected_sequence = music_pb2.NoteSequence()
expected_sequence.tempos.add(qpm=60)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(34, 100, 2, 4), (35, 100, 6, 11), (36, 100, 11, 13),
(37, 100, 17, 18)])
testing_lib.add_track_to_sequence(
expected_sequence, 1, [(57, 80, 4, 4.1), (58, 80, 12, 12.1)],
is_drum=True)
testing_lib.add_chords_to_sequence(
expected_sequence, [('N.C.', 0), ('D', 8), ('Bm', 16)])
self.assertEqual(expected_sequence, augmented_sequence)
def testAugmentStretch(self):
augmenter = data.NoteSequenceAugmenter(stretch_range=(0.5, 0.5))
augmented_sequence = augmenter.augment(self.sequence)
expected_sequence = music_pb2.NoteSequence()
expected_sequence.tempos.add(qpm=120)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(32, 100, 1, 2), (33, 100, 3, 5.5), (34, 100, 5.5, 6.5),
(35, 100, 8.5, 9)])
testing_lib.add_track_to_sequence(
expected_sequence, 1, [(57, 80, 2, 2.05), (58, 80, 6, 6.05)],
is_drum=True)
testing_lib.add_chords_to_sequence(
expected_sequence, [('N.C.', 0), ('C', 4), ('Am', 8)])
self.assertEqual(expected_sequence, augmented_sequence)
def testTfAugment(self):
augmenter = data.NoteSequenceAugmenter(
transpose_range=(-3, -3), stretch_range=(2.0, 2.0))
with self.test_session() as sess:
sequence_str = tf.placeholder(tf.string)
augmented_sequence_str_ = augmenter.tf_augment(sequence_str)
augmented_sequence_str = sess.run(
[augmented_sequence_str_],
feed_dict={sequence_str: self.sequence.SerializeToString()})
augmented_sequence = music_pb2.NoteSequence.FromString(
augmented_sequence_str[0])
expected_sequence = music_pb2.NoteSequence()
expected_sequence.tempos.add(qpm=30)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(29, 100, 4, 8), (30, 100, 12, 22), (31, 100, 22, 26),
(32, 100, 34, 36)])
testing_lib.add_track_to_sequence(
expected_sequence, 1, [(57, 80, 8, 8.2), (58, 80, 24, 24.2)],
is_drum=True)
testing_lib.add_chords_to_sequence(
expected_sequence, [('N.C.', 0), ('A', 16), ('Gbm', 32)])
self.assertEqual(expected_sequence, augmented_sequence)
class BaseDataTest(object):
def labels_to_inputs(self, labels, converter):
return [data.np_onehot(l, converter.input_depth, converter.input_dtype)
for l in labels]
def assertArraySetsEqual(self, lhs, rhs):
def _np_sorted(arr_list):
return sorted(arr_list, key=lambda x: x.tostring())
self.assertEqual(len(lhs), len(rhs))
for a, b in zip(_np_sorted(lhs), _np_sorted(rhs)):
# Convert bool type to int for easier-to-read error messages.
if a.dtype == np.bool:
a = a.astype(np.int)
if b.dtype == np.bool:
b = b.astype(np.int)
np.testing.assert_array_equal(a, b)
class BaseOneHotDataTest(BaseDataTest):
def testUnsliced(self):
converter = self.converter_class(steps_per_quarter=1, slice_bars=None)
tensors = converter.to_tensors(self.sequence)
actual_unsliced_labels = [np.argmax(t, axis=-1) for t in tensors.outputs]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
tensors.inputs)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
def testTfUnsliced(self):
converter = self.converter_class(steps_per_quarter=1, slice_bars=None)
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, _, lengths_ = converter.tf_to_tensors(
sequence)
input_tensors, output_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)]
actual_unsliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
actual_input_tensors)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
def testUnslicedEndToken(self):
orig_converter = self.converter_class(
steps_per_quarter=1, slice_bars=None)
self.assertEqual(None, orig_converter.end_token)
converter = self.converter_class(
steps_per_quarter=1, slice_bars=None, add_end_token=True)
self.assertEqual(orig_converter.input_depth + 1, converter.input_depth)
self.assertEqual(orig_converter.output_depth, converter.end_token)
self.assertEqual(orig_converter.output_depth + 1, converter.output_depth)
expected_unsliced_labels = [
np.append(l, [converter.end_token])
for l in self.expected_unsliced_labels]
tensors = converter.to_tensors(self.sequence)
actual_unsliced_labels = [np.argmax(t, axis=-1) for t in tensors.outputs]
self.assertArraySetsEqual(
self.labels_to_inputs(expected_unsliced_labels, converter),
tensors.inputs)
self.assertArraySetsEqual(expected_unsliced_labels, actual_unsliced_labels)
def testSliced(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None)
tensors = converter.to_tensors(self.sequence)
actual_sliced_labels = [np.argmax(t, axis=-1) for t in tensors.outputs]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
tensors.inputs)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
def testTfSliced(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None)
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, _, lengths_ = converter.tf_to_tensors(
sequence)
input_tensors, output_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_sliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
input_tensors)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
class BaseChordConditionedOneHotDataTest(BaseOneHotDataTest):
def testUnslicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=None,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
tensors = converter.to_tensors(self.sequence)
actual_unsliced_labels = [np.argmax(t, axis=-1) for t in tensors.outputs]
actual_unsliced_chord_labels = [
np.argmax(t, axis=-1) for t in tensors.controls]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
tensors.inputs)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
self.assertArraySetsEqual(
self.expected_unsliced_chord_labels, actual_unsliced_chord_labels)
def testTfUnslicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=None,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, control_tensors_, lengths_ = (
converter.tf_to_tensors(sequence))
input_tensors, output_tensors, control_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, control_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_input_tensors = [t[:l] for t, l in zip(input_tensors, lengths)]
actual_unsliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
actual_unsliced_chord_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_unsliced_labels, converter),
actual_input_tensors)
self.assertArraySetsEqual(
self.expected_unsliced_labels, actual_unsliced_labels)
self.assertArraySetsEqual(
self.expected_unsliced_chord_labels, actual_unsliced_chord_labels)
def testSlicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
tensors = converter.to_tensors(self.sequence)
actual_sliced_labels = [np.argmax(t, axis=-1) for t in tensors.outputs]
actual_sliced_chord_labels = [
np.argmax(t, axis=-1) for t in tensors.controls]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
tensors.inputs)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
self.assertArraySetsEqual(
self.expected_sliced_chord_labels, actual_sliced_chord_labels)
def testTfSlicedChordConditioned(self):
converter = self.converter_class(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=None,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
with self.test_session() as sess:
sequence = tf.placeholder(tf.string)
input_tensors_, output_tensors_, control_tensors_, lengths_ = (
converter.tf_to_tensors(sequence))
input_tensors, output_tensors, control_tensors, lengths = sess.run(
[input_tensors_, output_tensors_, control_tensors_, lengths_],
feed_dict={sequence: self.sequence.SerializeToString()})
actual_sliced_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(output_tensors, lengths)]
actual_sliced_chord_labels = [
np.argmax(t, axis=-1)[:l] for t, l in zip(control_tensors, lengths)]
self.assertArraySetsEqual(
self.labels_to_inputs(self.expected_sliced_labels, converter),
input_tensors)
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
self.assertArraySetsEqual(
self.expected_sliced_chord_labels, actual_sliced_chord_labels)
class OneHotMelodyConverterTest(BaseChordConditionedOneHotDataTest,
tf.test.TestCase):
def setUp(self):
sequence = music_pb2.NoteSequence()
sequence.tempos.add(qpm=60)
testing_lib.add_track_to_sequence(
sequence, 0,
[(32, 100, 2, 4), (33, 1, 6, 11), (34, 1, 11, 13),
(35, 1, 17, 19)])
testing_lib.add_track_to_sequence(
sequence, 1,
[(35, 127, 2, 4), (36, 50, 6, 8),
(71, 100, 33, 37), (73, 100, 34, 37),
(33, 1, 50, 55), (34, 1, 55, 56)])
testing_lib.add_chords_to_sequence(
sequence,
[('F', 2), ('C', 8), ('Am', 16), ('N.C.', 20),
('Bb7', 32), ('G', 36), ('F', 48), ('C', 52)])
self.sequence = sequence
# Subtract min pitch (21).
expected_unsliced_events = [
(NO_EVENT, NO_EVENT, 11, NO_EVENT,
NOTE_OFF, NO_EVENT, 12, NO_EVENT,
NO_EVENT, NO_EVENT, NO_EVENT, 13,
NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT),
(NO_EVENT, 14, NO_EVENT, NOTE_OFF),
(NO_EVENT, NO_EVENT, 14, NO_EVENT,
NOTE_OFF, NO_EVENT, 15, NO_EVENT),
(NO_EVENT, 50, 52, NO_EVENT,
NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT),
(NO_EVENT, NO_EVENT, 12, NO_EVENT,
NO_EVENT, NO_EVENT, NO_EVENT, 13),
]
self.expected_unsliced_labels = [
np.array(es) + 2 for es in expected_unsliced_events]
expected_sliced_events = [
(NO_EVENT, NO_EVENT, 11, NO_EVENT,
NOTE_OFF, NO_EVENT, 12, NO_EVENT),
(NO_EVENT, NO_EVENT, 12, NO_EVENT,
NO_EVENT, NO_EVENT, NO_EVENT, 13),
(NO_EVENT, NO_EVENT, NO_EVENT, 13,
NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT),
(NO_EVENT, NO_EVENT, 14, NO_EVENT,
NOTE_OFF, NO_EVENT, 15, NO_EVENT),
(NO_EVENT, 50, 52, NO_EVENT,
NO_EVENT, NOTE_OFF, NO_EVENT, NO_EVENT)
]
self.expected_sliced_labels = [
np.array(es) + 2 for es in expected_sliced_events]
chord_encoding = mm.MajorMinorChordOneHotEncoding()
expected_unsliced_chord_events = [
(NO_CHORD, NO_CHORD, 'F', 'F',
'F', 'F', 'F', 'F',
'C', 'C', 'C', 'C',
'C', 'C', 'C', 'C'),
('Am', 'Am', 'Am', 'Am'),
(NO_CHORD, NO_CHORD, 'F', 'F',
'F', 'F', 'F', 'F'),
('Bb7', 'Bb7', 'Bb7', 'Bb7',
'G', 'G', 'G', 'G'),
('F', 'F', 'F', 'F',
'C', 'C', 'C', 'C'),
]
self.expected_unsliced_chord_labels = [
np.array([chord_encoding.encode_event(e) for e in es])
for es in expected_unsliced_chord_events]
expected_sliced_chord_events = [
(NO_CHORD, NO_CHORD, 'F', 'F',
'F', 'F', 'F', 'F'),
('F', 'F', 'F', 'F',
'C', 'C', 'C', 'C'),
('C', 'C', 'C', 'C',
'C', 'C', 'C', 'C'),
(NO_CHORD, NO_CHORD, 'F', 'F',
'F', 'F', 'F', 'F'),
('Bb7', 'Bb7', 'Bb7', 'Bb7',
'G', 'G', 'G', 'G'),
]
self.expected_sliced_chord_labels = [
np.array([chord_encoding.encode_event(e) for e in es])
for es in expected_sliced_chord_events]
self.converter_class = data.OneHotMelodyConverter
def testMaxOutputsPerNoteSequence(self):
converter = data.OneHotMelodyConverter(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=2)
self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = 3
self.assertEqual(3, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = 100
self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs))
def testIsTraining(self):
converter = data.OneHotMelodyConverter(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=2)
self.is_training = True
self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = None
self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs))
def testToNoteSequence(self):
converter = data.OneHotMelodyConverter(
steps_per_quarter=1, slice_bars=4, max_tensors_per_notesequence=1)
tensors = converter.to_tensors(
filter_instrument(self.sequence, 0))
sequences = converter.to_notesequences(tensors.outputs)
self.assertEqual(1, len(sequences))
expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
expected_sequence.tempos.add(qpm=120)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(32, 80, 1.0, 2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)])
self.assertProtoEquals(expected_sequence, sequences[0])
def testToNoteSequenceChordConditioned(self):
converter = data.OneHotMelodyConverter(
steps_per_quarter=1, slice_bars=4, max_tensors_per_notesequence=1,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
tensors = converter.to_tensors(
filter_instrument(self.sequence, 0))
sequences = converter.to_notesequences(tensors.outputs, tensors.controls)
self.assertEqual(1, len(sequences))
expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
expected_sequence.tempos.add(qpm=120)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(32, 80, 1.0, 2.0), (33, 80, 3.0, 5.5), (34, 80, 5.5, 6.5)])
testing_lib.add_chords_to_sequence(
expected_sequence, [('N.C.', 0), ('F', 1), ('C', 4)])
self.assertProtoEquals(expected_sequence, sequences[0])
class OneHotDrumsConverterTest(BaseOneHotDataTest, tf.test.TestCase):
def setUp(self):
sequence = music_pb2.NoteSequence()
sequence.tempos.add(qpm=60)
testing_lib.add_track_to_sequence(
sequence, 0,
[(35, 100, 0, 10), (44, 55, 1, 2), (40, 45, 4, 5), (35, 45, 9, 10),
(40, 45, 13, 13), (55, 120, 16, 18), (60, 100, 16, 17),
(52, 99, 19, 20)],
is_drum=True)
testing_lib.add_track_to_sequence(
sequence, 1,
[(35, 55, 1, 2), (40, 45, 25, 26), (55, 120, 28, 30), (60, 100, 28, 29),
(52, 99, 31, 33)],
is_drum=True)
self.sequence = sequence
expected_unsliced_events = [
(1, 5, NO_DRUMS, NO_DRUMS,
2, NO_DRUMS, NO_DRUMS, NO_DRUMS),
(NO_DRUMS, 1, NO_DRUMS, NO_DRUMS,
NO_DRUMS, 2, NO_DRUMS, NO_DRUMS,
160, NO_DRUMS, NO_DRUMS, 256),
(NO_DRUMS, 2, NO_DRUMS, NO_DRUMS,
160, NO_DRUMS, NO_DRUMS, 256)
]
self.expected_unsliced_labels = [
np.array(es) for es in expected_unsliced_events]
expected_sliced_events = [
(1, 5, NO_DRUMS, NO_DRUMS,
2, NO_DRUMS, NO_DRUMS, NO_DRUMS),
(NO_DRUMS, 1, NO_DRUMS, NO_DRUMS,
NO_DRUMS, 2, NO_DRUMS, NO_DRUMS),
(NO_DRUMS, 2, NO_DRUMS, NO_DRUMS,
160, NO_DRUMS, NO_DRUMS, 256)
]
self.expected_sliced_labels = [
np.array(es) for es in expected_sliced_events]
self.converter_class = data.DrumsConverter
def testMaxOutputsPerNoteSequence(self):
converter = data.DrumsConverter(
steps_per_quarter=1, slice_bars=1, max_tensors_per_notesequence=2)
self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = 3
self.assertEqual(3, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = 100
self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs))
def testIsTraining(self):
converter = data.DrumsConverter(
steps_per_quarter=1, slice_bars=1, max_tensors_per_notesequence=2)
self.is_training = True
self.assertEqual(2, len(converter.to_tensors(self.sequence).inputs))
converter.max_tensors_per_notesequence = None
self.assertEqual(5, len(converter.to_tensors(self.sequence).inputs))
def testToNoteSequence(self):
converter = data.DrumsConverter(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1)
tensors = converter.to_tensors(
filter_instrument(self.sequence, 1))
sequences = converter.to_notesequences(tensors.outputs)
self.assertEqual(1, len(sequences))
expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
expected_sequence.tempos.add(qpm=120)
testing_lib.add_track_to_sequence(
expected_sequence, 9,
[(38, 80, 0.5, 1.0),
(48, 80, 2.0, 2.5), (49, 80, 2.0, 2.5),
(51, 80, 3.5, 4.0)],
is_drum=True)
self.assertProtoEquals(expected_sequence, sequences[0])
class RollInputsOneHotDrumsConverterTest(OneHotDrumsConverterTest):
def labels_to_inputs(self, labels, converter):
inputs = []
for label_arr in labels:
input_ = np.zeros((len(label_arr), converter.input_depth),
converter.input_dtype)
for i, l in enumerate(label_arr):
if l == converter.end_token:
input_[i, -2] = 1
elif l == 0:
input_[i, -1] = 1
else:
j = 0
while l:
input_[i, j] = l % 2
l >>= 1
j += 1
assert np.any(input_[i]), label_arr.astype(np.int)
inputs.append(input_)
return inputs
def setUp(self):
super(RollInputsOneHotDrumsConverterTest, self).setUp()
self.converter_class = functools.partial(
data.DrumsConverter, roll_input=True)
class RollOutputsDrumsConverterTest(BaseDataTest, tf.test.TestCase):
def setUp(self):
sequence = music_pb2.NoteSequence()
sequence.tempos.add(qpm=60)
testing_lib.add_track_to_sequence(
sequence, 0,
[(35, 100, 0, 10), (35, 55, 1, 2), (44, 55, 1, 2),
(40, 45, 4, 5),
(35, 45, 9, 10),
(40, 45, 13, 13),
(55, 120, 16, 18), (60, 100, 16, 17), (52, 99, 19, 20),
(40, 45, 33, 34), (55, 120, 36, 37), (60, 100, 36, 37),
(52, 99, 39, 42)],
is_drum=True)
testing_lib.add_track_to_sequence(
sequence, 1,
[(35, 100, 5, 10), (35, 55, 6, 8), (44, 55, 7, 9)],
is_drum=False)
self.sequence = sequence
def testSliced(self):
expected_sliced_events = [
([0], [0, 2], [], [],
[1], [], [], []),
([], [0], [], [],
[], [1], [], []),
([], [1], [], [],
[5, 7], [], [], [8]),
]
expected_silent_array = np.array([
[0, 0, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 0, 1, 1, 0],
])
expected_output_tensors = np.zeros(
(len(expected_sliced_events), 8, len(data.REDUCED_DRUM_PITCH_CLASSES)),
np.bool)
for i, events in enumerate(expected_sliced_events):
for j, e in enumerate(events):
expected_output_tensors[i, j, e] = 1
converter = data.DrumsConverter(
pitch_classes=data.REDUCED_DRUM_PITCH_CLASSES,
slice_bars=2,
steps_per_quarter=1,
roll_input=True,
roll_output=True,
max_tensors_per_notesequence=None)
self.assertEqual(10, converter.input_depth)
self.assertEqual(9, converter.output_depth)
tensors = converter.to_tensors(self.sequence)
self.assertArraySetsEqual(
np.append(
expected_output_tensors,
np.expand_dims(expected_silent_array, axis=2),
axis=2),
tensors.inputs)
self.assertArraySetsEqual(expected_output_tensors, tensors.outputs)
def testToNoteSequence(self):
converter = data.DrumsConverter(
pitch_classes=data.REDUCED_DRUM_PITCH_CLASSES,
slice_bars=None,
gap_bars=None,
steps_per_quarter=1,
roll_input=True,
roll_output=True,
max_tensors_per_notesequence=None)
tensors = converter.to_tensors(self.sequence)
sequences = converter.to_notesequences(tensors.outputs)
self.assertEqual(1, len(sequences))
expected_sequence = music_pb2.NoteSequence(ticks_per_quarter=220)
expected_sequence.tempos.add(qpm=120)
testing_lib.add_track_to_sequence(
expected_sequence, 0,
[(36, 80, 0, 0.5), (42, 80, 0.5, 1.0), (36, 80, 0.5, 1.0),
(38, 80, 2.0, 2.5),
(36, 80, 4.5, 5.0),
(38, 80, 6.5, 7.0),
(48, 80, 8.0, 8.5), (49, 80, 8.0, 8.5), (51, 80, 9.5, 10.0),
(38, 80, 16.5, 17.0), (48, 80, 18.0, 18.5), (49, 80, 18.0, 18.5),
(51, 80, 19.5, 20.0)],
is_drum=True)
for n in expected_sequence.notes:
n.instrument = 9
self.assertProtoEquals(expected_sequence, sequences[0])
class TrioConverterTest(BaseDataTest, tf.test.TestCase):
def setUp(self):
sequence = music_pb2.NoteSequence()
sequence.tempos.add(qpm=60)
# Mel 1, coverage bars: [3, 9] / [2, 9]
testing_lib.add_track_to_sequence(
sequence, 1, [(51, 1, 13, 37)])
# Mel 2, coverage bars: [1, 3] / [0, 4]
testing_lib.add_track_to_sequence(
sequence, 2, [(52, 1, 4, 16)])
# Bass, coverage bars: [0, 1], [4, 6] / [0, 7]
testing_lib.add_track_to_sequence(
sequence, 3, [(50, 1, 2, 5), (49, 1, 16, 25)])
# Drum, coverage bars: [0, 2], [6, 7] / [0, 3], [5, 8]
testing_lib.add_track_to_sequence(
sequence, 4,
[(35, 1, 0, 1), (40, 1, 4, 5),
(35, 1, 9, 9), (35, 1, 25, 25),
(40, 1, 29, 29)],
is_drum=True)
# Chords.
testing_lib.add_chords_to_sequence(
sequence, [('C', 4), ('Am', 16), ('G', 32)])
for n in sequence.notes:
if n.instrument == 1:
n.program = 0
elif n.instrument == 2:
n.program = 10
elif n.instrument == 3:
n.program = 33
self.sequence = sequence
m1 = np.array(
[NO_EVENT] * 13 + [30] + [NO_EVENT] * 23 + [NOTE_OFF] + [NO_EVENT] * 2,
np.int32) + 2
m2 = np.array(
[NO_EVENT] * 4 + [31] + [NO_EVENT] * 11 + [NOTE_OFF] + [NO_EVENT] * 23,
np.int32) + 2
b = np.array(
[NO_EVENT, NO_EVENT, 29, NO_EVENT, NO_EVENT, NOTE_OFF] +
[NO_EVENT] * 10 + [28] + [NO_EVENT] * 8 + [NOTE_OFF] + [NO_EVENT] * 14,
np.int32) + 2
d = ([1, NO_DRUMS, NO_DRUMS, NO_DRUMS,
2, NO_DRUMS, NO_DRUMS, NO_DRUMS,
NO_DRUMS, 1, NO_DRUMS, NO_DRUMS] +
[NO_DRUMS] * 12 +
[NO_DRUMS, 1, NO_DRUMS, NO_DRUMS,
NO_DRUMS, 2, NO_DRUMS, NO_DRUMS] +
[NO_DRUMS] * 4)
c = [NO_CHORD, NO_CHORD, NO_CHORD, NO_CHORD,
'C', 'C', 'C', 'C',
'C', 'C', 'C', 'C',
'C', 'C', 'C', 'C',
'Am', 'Am', 'Am', 'Am',
'Am', 'Am', 'Am', 'Am',
'Am', 'Am', 'Am', 'Am',
'Am', 'Am', 'Am', 'Am',
'G', 'G', 'G', 'G']
expected_sliced_sets = [
((2, 4), (m1, b, d)),
((5, 7), (m1, b, d)),
((6, 8), (m1, b, d)),
((0, 2), (m2, b, d)),
((1, 3), (m2, b, d)),
((2, 4), (m2, b, d)),
]
self.expected_sliced_labels = [
np.stack([l[i*4:j*4] for l in x]) for (i, j), x in expected_sliced_sets]
chord_encoding = mm.MajorMinorChordOneHotEncoding()
expected_sliced_chord_events = [
c[i*4:j*4] for (i, j), _ in expected_sliced_sets]
self.expected_sliced_chord_labels = [
np.array([chord_encoding.encode_event(e) for e in es])
for es in expected_sliced_chord_events]
def testSliced(self):
converter = data.TrioConverter(
steps_per_quarter=1, gap_bars=1, slice_bars=2,
max_tensors_per_notesequence=None)
tensors = converter.to_tensors(self.sequence)
self.assertArraySetsEqual(tensors.inputs, tensors.outputs)
actual_sliced_labels = [
np.stack(np.argmax(s, axis=-1) for s in np.split(t, [90, 180], axis=-1))
for t in tensors.outputs]
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
def testSlicedChordConditioned(self):
converter = data.TrioConverter(
steps_per_quarter=1, gap_bars=1, slice_bars=2,
max_tensors_per_notesequence=None,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
tensors = converter.to_tensors(self.sequence)
self.assertArraySetsEqual(tensors.inputs, tensors.outputs)
actual_sliced_labels = [
np.stack(np.argmax(s, axis=-1) for s in np.split(t, [90, 180], axis=-1))
for t in tensors.outputs]
actual_sliced_chord_labels = [
np.argmax(t, axis=-1) for t in tensors.controls]
self.assertArraySetsEqual(self.expected_sliced_labels, actual_sliced_labels)
self.assertArraySetsEqual(
self.expected_sliced_chord_labels, actual_sliced_chord_labels)
def testToNoteSequence(self):
converter = data.TrioConverter(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1)
mel_oh = data.np_onehot(self.expected_sliced_labels[3][0], 90)
bass_oh = data.np_onehot(self.expected_sliced_labels[3][1], 90)
drums_oh = data.np_onehot(self.expected_sliced_labels[3][2], 512)
output_tensors = np.concatenate([mel_oh, bass_oh, drums_oh], axis=-1)
sequences = converter.to_notesequences([output_tensors])
self.assertEqual(1, len(sequences))
self.assertProtoEquals(
"""
ticks_per_quarter: 220
tempos < qpm: 120 >
notes <
instrument: 0 pitch: 52 start_time: 2.0 end_time: 4.0 program: 0
velocity: 80
>
notes <
instrument: 1 pitch: 50 start_time: 1.0 end_time: 2.5 program: 33
velocity: 80
>
notes <
instrument: 9 pitch: 36 start_time: 0.0 end_time: 0.5 velocity: 80
is_drum: True
>
notes <
instrument: 9 pitch: 38 start_time: 2.0 end_time: 2.5 velocity: 80
is_drum: True
>
total_time: 4.0
""",
sequences[0])
def testToNoteSequenceChordConditioned(self):
converter = data.TrioConverter(
steps_per_quarter=1, slice_bars=2, max_tensors_per_notesequence=1,
chord_encoding=mm.MajorMinorChordOneHotEncoding())
mel_oh = data.np_onehot(self.expected_sliced_labels[3][0], 90)
bass_oh = data.np_onehot(self.expected_sliced_labels[3][1], 90)
drums_oh = data.np_onehot(self.expected_sliced_labels[3][2], 512)
chords_oh = data.np_onehot(self.expected_sliced_chord_labels[3], 25)
output_tensors = np.concatenate([mel_oh, bass_oh, drums_oh], axis=-1)
sequences = converter.to_notesequences([output_tensors], [chords_oh])
self.assertEqual(1, len(sequences))
self.assertProtoEquals(
"""
ticks_per_quarter: 220
tempos < qpm: 120 >
notes <
instrument: 0 pitch: 52 start_time: 2.0 end_time: 4.0 program: 0
velocity: 80
>
notes <
instrument: 1 pitch: 50 start_time: 1.0 end_time: 2.5 program: 33
velocity: 80
>
notes <
instrument: 9 pitch: 36 start_time: 0.0 end_time: 0.5 velocity: 80
is_drum: True
>
notes <
instrument: 9 pitch: 38 start_time: 2.0 end_time: 2.5 velocity: 80
is_drum: True
>
text_annotations <
text: 'N.C.' annotation_type: CHORD_SYMBOL
>
text_annotations <
time: 2.0 text: 'C' annotation_type: CHORD_SYMBOL
>
total_time: 4.0
""",
sequences[0])
class GrooveConverterTest(tf.test.TestCase):
def initialize_sequence(self):
sequence = music_pb2.NoteSequence()
sequence.ticks_per_quarter = 240
sequence.tempos.add(qpm=120)
sequence.time_signatures.add(numerator=4, denominator=4)
return sequence
def setUp(self):
self.one_bar_sequence = self.initialize_sequence()
self.one_bar_sequence.sequence_metadata.genre.extend(['Rock'])
self.two_bar_sequence = self.initialize_sequence()
self.tap_sequence = self.initialize_sequence()
self.quantized_sequence = self.initialize_sequence()
self.no_closed_hh_sequence = self.initialize_sequence()
self.no_snare_sequence = self.initialize_sequence()
kick_pitch = data.REDUCED_DRUM_PITCH_CLASSES[0][0]
snare_pitch = data.REDUCED_DRUM_PITCH_CLASSES[1][0]
closed_hh_pitch = data.REDUCED_DRUM_PITCH_CLASSES[2][0]
tap_pitch = data.REDUCED_DRUM_PITCH_CLASSES[3][0]
testing_lib.add_track_to_sequence(
self.tap_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(tap_pitch, 80, 0, 0.125),
(tap_pitch, 127, 0.26125, 0.375), # Not on the beat
(tap_pitch, 107, 0.5, 0.625),
(tap_pitch, 80, 0.75, 0.825),
(tap_pitch, 80, 1, 1.125),
(tap_pitch, 80, 1.25, 1.375),
(tap_pitch, 82, 1.523, 1.625), # Not on the beat
(tap_pitch, 80, 1.75, 1.825)
])
testing_lib.add_track_to_sequence(
self.quantized_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(kick_pitch, 0, 0, 0.125),
(closed_hh_pitch, 0, 0, 0.125),
(closed_hh_pitch, 0, 0.25, 0.375),
(snare_pitch, 0, 0.5, 0.625),
(closed_hh_pitch, 0, 0.5, 0.625),
(closed_hh_pitch, 0, 0.75, 0.825),
(kick_pitch, 0, 1, 1.125),
(closed_hh_pitch, 0, 1, 1.125),
(closed_hh_pitch, 0, 1.25, 1.375),
(snare_pitch, 0, 1.5, 1.625),
(closed_hh_pitch, 0, 1.5, 1.625),
(closed_hh_pitch, 0, 1.75, 1.825)
])
testing_lib.add_track_to_sequence(
self.no_closed_hh_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(kick_pitch, 80, 0, 0.125),
(snare_pitch, 103, 0.5, 0.625),
(kick_pitch, 80, 1, 1.125),
(snare_pitch, 82, 1.523, 1.625), # Not on the beat
])
testing_lib.add_track_to_sequence(
self.no_snare_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(kick_pitch, 80, 0, 0.125),
(closed_hh_pitch, 72, 0, 0.125),
(closed_hh_pitch, 127, 0.26125, 0.375), # Not on the beat
(closed_hh_pitch, 107, 0.5, 0.625),
(closed_hh_pitch, 80, 0.75, 0.825),
(kick_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1.25, 1.375),
(closed_hh_pitch, 80, 1.5, 1.625),
(closed_hh_pitch, 80, 1.75, 1.825)
])
testing_lib.add_track_to_sequence(
self.one_bar_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(kick_pitch, 80, 0, 0.125),
(closed_hh_pitch, 72, 0, 0.125),
(closed_hh_pitch, 127, 0.26125, 0.375), # Not on the beat
(snare_pitch, 103, 0.5, 0.625),
(closed_hh_pitch, 107, 0.5, 0.625),
(closed_hh_pitch, 80, 0.75, 0.825),
(kick_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1.25, 1.375),
(snare_pitch, 82, 1.523, 1.625), # Not on the beat
(closed_hh_pitch, 80, 1.5, 1.625),
(closed_hh_pitch, 80, 1.75, 1.825)
])
testing_lib.add_track_to_sequence(
self.two_bar_sequence,
9,
[
# 0.125 is a sixteenth note at 120bpm
(kick_pitch, 80, 0, 0.125),
(closed_hh_pitch, 72, 0, 0.125),
(closed_hh_pitch, 127, 0.26, 0.375), # Not on the beat
(snare_pitch, 103, 0.5, 0.625),
(closed_hh_pitch, 107, 0.5, 0.625),
(closed_hh_pitch, 80, 0.75, 0.825),
(kick_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1, 1.125),
(closed_hh_pitch, 80, 1.25, 1.375),
(snare_pitch, 80, 1.5, 1.625),
(closed_hh_pitch, 80, 1.5, 1.625),
(closed_hh_pitch, 80, 1.75, 1.825),
(kick_pitch, 80, 2, 2.125),
(closed_hh_pitch, 72, 2, 2.125),
(closed_hh_pitch, 127, 2.25, 2.375),
(snare_pitch, 103, 2.5, 2.625),
(closed_hh_pitch, 107, 2.5, 2.625),
(closed_hh_pitch, 80, 2.75, 2.825),
(kick_pitch, 80, 3.06, 3.125), # Not on the beat
(closed_hh_pitch, 109, 3, 3.125),
(closed_hh_pitch, 80, 3.25, 3.375),
(snare_pitch, 80, 3.5, 3.625),
(closed_hh_pitch, 80, 3.50, 3.625),
(closed_hh_pitch, 90, 3.75, 3.825)
])
for seq in [self.one_bar_sequence, self.two_bar_sequence]:
for n in seq.notes:
n.is_drum = True
def compare_seqs(self, seq1, seq2, verbose=False, categorical=False):
self.compare_notes(seq1.notes, seq2.notes, verbose=verbose,
categorical=categorical)
def compare_notes(self, note_list1, note_list2, verbose=False,
categorical=False):
for n1, n2 in zip(note_list1, note_list2):
if verbose:
tf.logging.info((n1.pitch, n1.start_time, n1.velocity))
tf.logging.info((n2.pitch, n2.start_time, n2.velocity))
print()
else:
if categorical:
self.assertEqual(n1.pitch, n2.pitch)
assert np.abs(n1.start_time-n2.start_time) < 0.005
assert np.abs(n1.velocity-n2.velocity) <= 4
else:
self.assertEqual((n1.pitch, n1.start_time, n1.velocity),
(n2.pitch, n2.start_time, n2.velocity))
def testToTensorAndNoteSequence(self):
# Convert one or two measures to a tensor and back
# This example should yield basically a perfect reconstruction
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5)
# Test one bar sequence
tensors = converter.to_tensors(self.one_bar_sequence)
# Should output a tuple containing a tensor of shape (16,27)
self.assertEqual((16, 27), tensors.outputs[0].shape)
sequences = converter.to_items(tensors.outputs)
self.assertEqual(1, len(sequences))
self.compare_seqs(self.one_bar_sequence, sequences[0])
# Test two bar sequence
tensors = converter.to_tensors(self.two_bar_sequence)
# Should output a tuple containing a tensor of shape (32,27)
self.assertEqual((32, 27), tensors.outputs[0].shape)
sequences = converter.to_items(tensors.outputs)
self.assertEqual(1, len(sequences))
self.compare_seqs(self.two_bar_sequence, sequences[0])
def testToTensorAndNoteSequenceWithSlicing(self):
converter = data.GrooveConverter(
split_bars=1, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5)
# Test one bar sequence
tensors = converter.to_tensors(self.one_bar_sequence)
# Should output a tuple containing a tensor of shape (16,27)
self.assertEqual(1, len(tensors.outputs))
self.assertEqual((16, 27), tensors.outputs[0].shape)
sequences = converter.to_items(tensors.outputs)
self.assertEqual(1, len(sequences))
self.compare_seqs(self.one_bar_sequence, sequences[0])
# Test two bar sequence
tensors = converter.to_tensors(self.two_bar_sequence)
# Should output a tuple containing 2 tensors of shape (16,27)
self.assertEqual((16, 27), tensors.outputs[0].shape)
self.assertEqual((16, 27), tensors.outputs[1].shape)
sequences = converter.to_items(tensors.outputs)
self.assertEqual(2, len(sequences))
# Get notes in first bar
sequence0 = sequences[0]
notes0 = [n for n in self.two_bar_sequence.notes if n.start_time < 2]
reconstructed_notes0 = [n for n in sequence0.notes]
# Get notes in second bar, back them up by 2 secs for comparison
sequence1 = sequences[1]
notes1 = [n for n in self.two_bar_sequence.notes if n.start_time >= 2]
for n in notes1:
n.start_time = n.start_time-2
n.end_time = n.end_time-2
reconstructed_notes1 = [n for n in sequence1.notes]
self.compare_notes(notes0, reconstructed_notes0)
self.compare_notes(notes1, reconstructed_notes1)
def testTapify(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, tapify=True)
tensors = converter.to_tensors(self.one_bar_sequence)
output_sequences = converter.to_items(tensors.outputs)
# Output sequence should match the initial input.
self.compare_seqs(self.one_bar_sequence, output_sequences[0])
# Input sequence should match the pre-defined tap_sequence.
input_sequences = converter.to_items(tensors.inputs)
self.compare_seqs(self.tap_sequence, input_sequences[0])
def testTapWithFixedVelocity(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, tapify=True, fixed_velocities=True)
tensors = converter.to_tensors(self.one_bar_sequence)
output_sequences = converter.to_items(tensors.outputs)
# Output sequence should match the initial input.
self.compare_seqs(self.one_bar_sequence, output_sequences[0])
# Input sequence should match the pre-defined tap_sequence but with 0 vels.
input_sequences = converter.to_items(tensors.inputs)
tap_notes = self.tap_sequence.notes
for note in tap_notes:
note.velocity = 0
self.compare_notes(tap_notes, input_sequences[0].notes)
def testHumanize(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, humanize=True)
tensors = converter.to_tensors(self.one_bar_sequence)
output_sequences = converter.to_items(tensors.outputs)
# Output sequence should match the initial input.
self.compare_seqs(self.one_bar_sequence, output_sequences[0])
# Input sequence should match the pre-defined quantized_sequence.
input_sequences = converter.to_items(tensors.inputs)
self.compare_seqs(self.quantized_sequence, input_sequences[0])
def testAddInstruments(self):
# Remove closed hi-hat from inputs.
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, add_instruments=[2])
tensors = converter.to_tensors(self.one_bar_sequence)
output_sequences = converter.to_items(tensors.outputs)
# Output sequence should match the initial input.
self.compare_seqs(self.one_bar_sequence, output_sequences[0])
# Input sequence should match the pre-defined sequence.
input_sequences = converter.to_items(tensors.inputs)
self.compare_seqs(self.no_closed_hh_sequence, input_sequences[0])
# Remove snare from inputs.
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, add_instruments=[1])
tensors = converter.to_tensors(self.one_bar_sequence)
output_sequences = converter.to_items(tensors.outputs)
# Output sequence should match the initial input.
self.compare_seqs(self.one_bar_sequence, output_sequences[0])
# Input sequence should match the pre-defined sequence.
input_sequences = converter.to_items(tensors.inputs)
self.compare_seqs(self.no_snare_sequence, input_sequences[0])
def testCategorical(self):
# Removes closed hi-hat from inputs.
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, add_instruments=[3],
num_velocity_bins=32, num_offset_bins=32)
tensors = converter.to_tensors(self.one_bar_sequence)
self.assertEqual((16, 585), tensors.outputs[0].shape)
output_sequences = converter.to_items(tensors.outputs)
self.compare_seqs(self.one_bar_sequence, output_sequences[0],
categorical=True)
def testContinuousSplitInstruments(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, split_instruments=True)
tensors = converter.to_tensors(self.one_bar_sequence)
self.assertEqual((16 * 9, 3), tensors.outputs[0].shape)
self.assertEqual((16 * 9, 9), tensors.controls[0].shape)
for i, v in enumerate(np.argmax(tensors.controls[0], axis=-1)):
self.assertEqual(i % 9, v)
output_sequences = converter.to_items(tensors.outputs)
self.compare_seqs(self.one_bar_sequence, output_sequences[0],
categorical=True)
def testCategoricalSplitInstruments(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, num_velocity_bins=32,
num_offset_bins=32, split_instruments=True)
tensors = converter.to_tensors(self.one_bar_sequence)
self.assertEqual((16 * 9, 585 // 9), tensors.outputs[0].shape)
self.assertEqual((16 * 9, 9), tensors.controls[0].shape)
for i, v in enumerate(np.argmax(tensors.controls[0], axis=-1)):
self.assertEqual(i % 9, v)
output_sequences = converter.to_items(tensors.outputs)
self.compare_seqs(self.one_bar_sequence, output_sequences[0],
categorical=True)
def testCycleData(self):
converter = data.GrooveConverter(
split_bars=1, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, hop_size=4)
tensors = converter.to_tensors(self.two_bar_sequence)
outputs = tensors.outputs
for output in outputs:
self.assertEqual(output.shape, (16, 27))
output_sequences = converter.to_items(tensors.outputs)
self.assertEqual(len(output_sequences), 5)
def testCycleDataSplitInstruments(self):
converter = data.GrooveConverter(
split_bars=1, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=10, hop_size=4,
split_instruments=True)
tensors = converter.to_tensors(self.two_bar_sequence)
outputs = tensors.outputs
controls = tensors.controls
output_sequences = converter.to_items(outputs)
self.assertEqual(len(outputs), 5)
self.assertEqual(len(controls), 5)
for output in outputs:
self.assertEqual(output.shape, (16*9, 3))
for control in controls:
self.assertEqual(control.shape, (16*9, 9))
# This compares output_sequences[0] to the first bar of two_bar_sequence
# since they are not actually the same length.
self.compare_seqs(self.two_bar_sequence, output_sequences[0])
self.assertEqual(output_sequences[0].notes[-1].start_time, 1.75)
def testHitsAsControls(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, hits_as_controls=True)
tensors = converter.to_tensors(self.one_bar_sequence)
self.assertEqual((16, 9), tensors.controls[0].shape)
def testHitsAsControlsSplitInstruments(self):
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, split_instruments=True,
hits_as_controls=True)
tensors = converter.to_tensors(self.two_bar_sequence)
controls = tensors.controls
self.assertEqual((32*9, 10), controls[0].shape)
def testSmallDrumSet(self):
# Convert one or two measures to a tensor and back
# This example should yield basically a perfect reconstruction
small_drum_set = [
data.REDUCED_DRUM_PITCH_CLASSES[0],
data.REDUCED_DRUM_PITCH_CLASSES[1] +
data.REDUCED_DRUM_PITCH_CLASSES[4] +
data.REDUCED_DRUM_PITCH_CLASSES[5] +
data.REDUCED_DRUM_PITCH_CLASSES[6],
data.REDUCED_DRUM_PITCH_CLASSES[2] + data.REDUCED_DRUM_PITCH_CLASSES[8],
data.REDUCED_DRUM_PITCH_CLASSES[3] + data.REDUCED_DRUM_PITCH_CLASSES[7]
]
converter = data.GrooveConverter(
split_bars=None, steps_per_quarter=4, quarters_per_bar=4,
max_tensors_per_notesequence=5, pitch_classes=small_drum_set)
# Test one bar sequence
tensors = converter.to_tensors(self.one_bar_sequence)
# Should output a tuple containing a tensor of shape (16, 12)
self.assertEqual((16, 12), tensors.outputs[0].shape)
sequences = converter.to_items(tensors.outputs)
self.assertEqual(1, len(sequences))
self.compare_seqs(self.one_bar_sequence, sequences[0])
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import os.path
import socket
from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
from pip._vendor.urllib3.response import HTTPResponse
from pip._vendor.urllib3.util import parse_url
from pip._vendor.urllib3.util import Timeout as TimeoutSauce
from pip._vendor.urllib3.util.retry import Retry
from pip._vendor.urllib3.exceptions import ClosedPoolError
from pip._vendor.urllib3.exceptions import ConnectTimeoutError
from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
from pip._vendor.urllib3.exceptions import MaxRetryError
from pip._vendor.urllib3.exceptions import NewConnectionError
from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
from pip._vendor.urllib3.exceptions import ProtocolError
from pip._vendor.urllib3.exceptions import ReadTimeoutError
from pip._vendor.urllib3.exceptions import SSLError as _SSLError
from pip._vendor.urllib3.exceptions import ResponseError
from .models import Response
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
get_encoding_from_headers, prepend_scheme_if_needed,
get_auth_from_url, urldefragauth, select_proxy)
from .structures import CaseInsensitiveDict
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError, InvalidSchema, InvalidProxyURL)
from .auth import _basic_auth_str
try:
from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
except ImportError:
def SOCKSProxyManager(*args, **kwargs):
raise InvalidSchema("Missing dependencies for SOCKS support.")
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
DEFAULT_POOL_TIMEOUT = None
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
raise NotImplementedError
def close(self):
"""Cleans up adapter specific items."""
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
:rtype: urllib3.ProxyManager
"""
if proxy in self.proxy_manager:
manager = self.proxy_manager[proxy]
elif proxy.lower().startswith('socks'):
username, password = get_auth_from_url(proxy)
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
proxy,
username=username,
password=password,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs
)
else:
proxy_headers = self.proxy_headers(proxy)
manager = self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return manager
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
if not cert_loc or not os.path.exists(cert_loc):
raise IOError("Could not find a suitable TLS CA certificate bundle, "
"invalid path: {0}".format(cert_loc))
conn.cert_reqs = 'CERT_REQUIRED'
if not os.path.isdir(cert_loc):
conn.ca_certs = cert_loc
else:
conn.ca_cert_dir = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
conn.ca_cert_dir = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
conn.key_file = None
if conn.cert_file and not os.path.exists(conn.cert_file):
raise IOError("Could not find the TLS certificate file, "
"invalid path: {0}".format(conn.cert_file))
if conn.key_file and not os.path.exists(conn.key_file):
raise IOError("Could not find the TLS key file, "
"invalid path: {0}".format(conn.key_file))
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
:rtype: requests.Response
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
:rtype: urllib3.ConnectionPool
"""
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL("Please check proxy URL. It is malformed"
" and could be missing the host.")
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this closes the PoolManager and any active ProxyManager,
which closes any pooled connections.
"""
self.poolmanager.clear()
for proxy in self.proxy_manager.values():
proxy.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
:rtype: str
"""
proxy = select_proxy(request.url, proxies)
scheme = urlparse(request.url).scheme
is_proxied_http_request = (proxy and scheme != 'https')
using_socks_proxy = False
if proxy:
proxy_scheme = urlparse(proxy).scheme.lower()
using_socks_proxy = proxy_scheme.startswith('socks')
url = request.path_url
if is_proxied_http_request and not using_socks_proxy:
url = urldefragauth(request.url)
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:rtype: dict
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple or urllib3 Timeout object
:param verify: (optional) Either a boolean, in which case it controls whether
we verify the server's TLS certificate, or a string, in which case it
must be a path to a CA bundle to use
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
:rtype: requests.Response
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif isinstance(timeout, TimeoutSauce):
pass
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
# Receive the response from the server
try:
# For Python 2.7+ versions, use buffering of HTTP
# responses
r = low_conn.getresponse(buffering=True)
except TypeError:
# For compatibility with Python 2.6 versions and back
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
# TODO: Remove this in 3.0.0: see #2811
if not isinstance(e.reason, NewConnectionError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
if isinstance(e.reason, _ProxyError):
raise ProxyError(e, request=request)
if isinstance(e.reason, _SSLError):
# This branch is for urllib3 v1.22 and later.
raise SSLError(e, request=request)
raise ConnectionError(e, request=request)
except ClosedPoolError as e:
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
# This branch is for urllib3 versions earlier than v1.22
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| |
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by Ting Pan
# --------------------------------------------------------
import lmdb
import os
import sys
def wrapper_str(raw_str):
if sys.version_info >= (3, 0):
return raw_str.encode()
return raw_str
class LMDB(object):
"""A wrapper of ``LMDB`` package.
We exploit more Key-Value specifics than Caffe's naive usages,
such as: ``Distributed Prefetching``, ``Chunk Shuffling``, and ``Cache Dropping``,
which provides splendid I/O efficiency comparing to ``MXNet`` or ``TensorFlow``.
Examples
--------
>>> db = LMDB()
>>> db.open('/xxx/yyy_lmdb', mode='w')
>>> db.put('000001', 'A')
>>> db.commit()
>>> db.close()
>>> db = LMDB()
>>> db.open('/xxx/yyy_lmdb', mode='r')
>>> db.set('000001')
>>> print(db.value())
>>> 'A'
"""
def __init__(self, max_commit=10000):
"""Construct a ``LMDB``.
Parameters
----------
max_commit : int
The max buffer size before committing automatically.
Returns
-------
LMDB
The database instance.
"""
self._max_commit = max_commit
self._cur_put = 0
self._total_size = 0
self._buffer = []
def open(self, database_path, mode='r'):
"""Open the database.
Parameters
----------
database_path : str
The path of the LMDB database.
mode : str
The mode. ``r`` or ``w``.
Returns
-------
None
"""
if mode == 'r':
assert os.path.exists(database_path), 'database path is not exist'
self.env = lmdb.open(database_path, readonly=True, lock=False)
self._total_size = self.env.info()['map_size']
if mode == 'w':
assert not os.path.isdir(database_path), 'database path is not invalid'
self.env = lmdb.open(database_path, writemap=True)
self.txn = self.env.begin(write=(mode == 'w'))
self.cursor = self.txn.cursor()
def _try_put(self):
"""Try to commit the buffers.
This is a trick to prevent ``1TB`` disk space required on ``NTFS`` partition.
Returns
-------
None
"""
for pair in self._buffer:
key, value = pair
try: self.txn.put(key, value)
except lmdb.MapFullError as e:
new_size = self.env.info()['map_size'] * 2
print('doubling LMDB map size to %d MB' % (new_size >> 20))
self.txn.abort()
self.env.set_mapsize(new_size)
self.txn = self.env.begin(write=True)
self._try_put()
self._cur_put = 0
self._buffer = []
def put(self, key, value):
"""Put the item.
Parameters
----------
key : str
The key.
value : str
The value.
Returns
-------
None
"""
self._buffer.append((wrapper_str(key), value))
self._cur_put += 1
if (self._cur_put >= self._max_commit): self._try_put()
def commit(self):
"""Commit all items that have been put.
Returns
-------
None
"""
self._try_put()
self.txn.commit()
self.txn = self.env.begin(write=True)
def set(self, key):
"""Set the cursor to the specific key.
Parameters
----------
key : str
The key to set.
Returns
-------
None
"""
self.cursor.set_key(wrapper_str(key))
def get(self, key):
"""Get the value of the specific key.
Parameters
----------
key : str
The key.
Returns
-------
str
The value.
"""
cursor = self.txn.cursor()
return cursor.get(wrapper_str(key))
def next(self):
"""Set the cursor to the next.
Returns
-------
None
"""
if not self.cursor.next():
self.cursor.first()
if self.key() == 'size' or self.key() == 'zfill':
self.next()
def key(self):
"""Get the key under the current cursor.
Returns
-------
str
The key.
"""
return self.cursor.key()
def value(self):
"""Get the value under the current cursor.
Returns
-------
str
The value.
"""
return self.cursor.value()
def close(self):
"""Close the database.
Returns
-------
None
"""
self.env.close()
| |
""" Python Character Mapping Codec tis_620 generated from 'python-mappings/TIS-620.TXT' with gencodec.py.
""" # "
import codecs
# ## Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='tis-620',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\ufffe'
u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
u'\u0e24' # 0xC4 -> THAI CHARACTER RU
u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
u'\u0e26' # 0xC6 -> THAI CHARACTER LU
u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
u'\u0e51' # 0xF1 -> THAI DIGIT ONE
u'\u0e52' # 0xF2 -> THAI DIGIT TWO
u'\u0e53' # 0xF3 -> THAI DIGIT THREE
u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
u'\u0e56' # 0xF6 -> THAI DIGIT SIX
u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
u'\u0e59' # 0xF9 -> THAI DIGIT NINE
u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
u'\ufffe'
u'\ufffe'
u'\ufffe'
u'\ufffe'
)
### Encoding table
encoding_table = codecs.charmap_build(decoding_table)
| |
from __future__ import with_statement
import operator
from nose import SkipTest
from ..util import decorator
from . import config
from .. import util
import contextlib
class skip_if(object):
def __init__(self, predicate, reason=None):
self.predicate = _as_predicate(predicate)
self.reason = reason
_fails_on = None
@property
def enabled(self):
return not self.predicate()
@contextlib.contextmanager
def fail_if(self, name='block'):
try:
yield
except Exception, ex:
if self.predicate():
print ("%s failed as expected (%s): %s " % (
name, self.predicate, str(ex)))
else:
raise
else:
if self.predicate():
raise AssertionError(
"Unexpected success for '%s' (%s)" %
(name, self.predicate))
def __call__(self, fn):
@decorator
def decorate(fn, *args, **kw):
if self.predicate():
if self.reason:
msg = "'%s' : %s" % (
fn.__name__,
self.reason
)
else:
msg = "'%s': %s" % (
fn.__name__, self.predicate
)
raise SkipTest(msg)
else:
if self._fails_on:
with self._fails_on.fail_if(name=fn.__name__):
return fn(*args, **kw)
else:
return fn(*args, **kw)
return decorate(fn)
def fails_on(self, other, reason=None):
self._fails_on = skip_if(other, reason)
return self
class fails_if(skip_if):
def __call__(self, fn):
@decorator
def decorate(fn, *args, **kw):
with self.fail_if(name=fn.__name__):
return fn(*args, **kw)
return decorate(fn)
def only_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return skip_if(NotPredicate(predicate), reason)
def succeeds_if(predicate, reason=None):
predicate = _as_predicate(predicate)
return fails_if(NotPredicate(predicate), reason)
class Predicate(object):
@classmethod
def as_predicate(cls, predicate):
if isinstance(predicate, skip_if):
return predicate.predicate
elif isinstance(predicate, Predicate):
return predicate
elif isinstance(predicate, list):
return OrPredicate([cls.as_predicate(pred) for pred in predicate])
elif isinstance(predicate, tuple):
return SpecPredicate(*predicate)
elif isinstance(predicate, basestring):
return SpecPredicate(predicate, None, None)
elif util.callable(predicate):
return LambdaPredicate(predicate)
else:
assert False, "unknown predicate type: %s" % predicate
class BooleanPredicate(Predicate):
def __init__(self, value, description=None):
self.value = value
self.description = description or "boolean %s" % value
def __call__(self):
return self.value
def _as_string(self, negate=False):
if negate:
return "not " + self.description
else:
return self.description
def __str__(self):
return self._as_string()
class SpecPredicate(Predicate):
def __init__(self, db, op=None, spec=None, description=None):
self.db = db
self.op = op
self.spec = spec
self.description = description
_ops = {
'<': operator.lt,
'>': operator.gt,
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'in': operator.contains,
'between': lambda val, pair: val >= pair[0] and val <= pair[1],
}
def __call__(self, engine=None):
if engine is None:
engine = config.db
if "+" in self.db:
dialect, driver = self.db.split('+')
else:
dialect, driver = self.db, None
if dialect and engine.name != dialect:
return False
if driver is not None and engine.driver != driver:
return False
if self.op is not None:
assert driver is None, "DBAPI version specs not supported yet"
version = _server_version(engine)
oper = hasattr(self.op, '__call__') and self.op \
or self._ops[self.op]
return oper(version, self.spec)
else:
return True
def _as_string(self, negate=False):
if self.description is not None:
return self.description
elif self.op is None:
if negate:
return "not %s" % self.db
else:
return "%s" % self.db
else:
if negate:
return "not %s %s %s" % (
self.db,
self.op,
self.spec
)
else:
return "%s %s %s" % (
self.db,
self.op,
self.spec
)
def __str__(self):
return self._as_string()
class LambdaPredicate(Predicate):
def __init__(self, lambda_, description=None, args=None, kw=None):
self.lambda_ = lambda_
self.args = args or ()
self.kw = kw or {}
if description:
self.description = description
elif lambda_.__doc__:
self.description = lambda_.__doc__
else:
self.description = "custom function"
def __call__(self):
return self.lambda_(*self.args, **self.kw)
def _as_string(self, negate=False):
if negate:
return "not " + self.description
else:
return self.description
def __str__(self):
return self._as_string()
class NotPredicate(Predicate):
def __init__(self, predicate):
self.predicate = predicate
def __call__(self, *arg, **kw):
return not self.predicate(*arg, **kw)
def __str__(self):
return self.predicate._as_string(True)
class OrPredicate(Predicate):
def __init__(self, predicates, description=None):
self.predicates = predicates
self.description = description
def __call__(self, *arg, **kw):
for pred in self.predicates:
if pred(*arg, **kw):
self._str = pred
return True
return False
_str = None
def _eval_str(self, negate=False):
if self._str is None:
if negate:
conjunction = " and "
else:
conjunction = " or "
return conjunction.join(p._as_string(negate=negate)
for p in self.predicates)
else:
return self._str._as_string(negate=negate)
def _negation_str(self):
if self.description is not None:
return "Not " + (self.description % {"spec": self._str})
else:
return self._eval_str(negate=True)
def _as_string(self, negate=False):
if negate:
return self._negation_str()
else:
if self.description is not None:
return self.description % {"spec": self._str}
else:
return self._eval_str()
def __str__(self):
return self._as_string()
_as_predicate = Predicate.as_predicate
def _is_excluded(db, op, spec):
return SpecPredicate(db, op, spec)()
def _server_version(engine):
"""Return a server_version_info tuple."""
# force metadata to be retrieved
conn = engine.connect()
version = getattr(engine.dialect, 'server_version_info', ())
conn.close()
return version
def db_spec(*dbs):
return OrPredicate(
Predicate.as_predicate(db) for db in dbs
)
def open():
return skip_if(BooleanPredicate(False, "mark as execute"))
def closed():
return skip_if(BooleanPredicate(True, "marked as skip"))
@decorator
def future(fn, *args, **kw):
return fails_if(LambdaPredicate(fn, *args, **kw), "Future feature")
def fails_on(db, reason=None):
return fails_if(SpecPredicate(db), reason)
def fails_on_everything_except(*dbs):
return succeeds_if(
OrPredicate([
SpecPredicate(db) for db in dbs
])
)
def skip(db, reason=None):
return skip_if(SpecPredicate(db), reason)
def only_on(dbs, reason=None):
return only_if(
OrPredicate([SpecPredicate(db) for db in util.to_list(dbs)])
)
def exclude(db, op, spec, reason=None):
return skip_if(SpecPredicate(db, op, spec), reason)
def against(*queries):
return OrPredicate([
Predicate.as_predicate(query)
for query in queries
])()
| |
#!/usr/bin/env python
'''Configuration and execution of the actual pipeline.
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2015 Diffeo, Inc.
The :class:`Pipeline` itself consists of a series of
:mod:`~streamcorpus_pipeline.stages`. These are broken into several
categories:
* Exactly one *reader* runs first, producing a sequence of
:class:`streamcorpus.StreamItem` objects.
* The stream items are fed through *incremental transforms*, which take
one stream item in and produce one stream item out.
* All of the stream items are written to a file, and *batch transforms*
can operate on the entire collection of stream items at once.
* *Post-batch incremental transforms* again operate on individual
stream items.
* Some number of *writers* send the final output somewhere.
Configuration
=============
The :command:`streamcorpus_pipeline` tool expects configuration in
a YAML file. The configuration resulting from this can be passed
through :class:`PipelineFactory` to create :class:`Pipeline` objects.
A typical coniguration looks like:
.. code-block:: yaml
streamcorpus_pipeline:
# Lists of stages
reader: from_local_chunks
incremental_transforms: [clean_html, language]
batch_transforms: []
post_batch_incremental_transforms: []
# to_local_chunks must be the last writer if it is used
writers: [to_local_chunks]
# Configuration for specific stages
clean_html:
include_language_codes: [en]
The ``streamcorpus_pipeline`` block can additionally be configured
with:
.. code-block:: yaml
root_path: /home/user/diffeo
Any configuration variable whose name ends in "path" whose value is
not an absolute path is considered relative to this directory. If
omitted, use the current directory.
.. code-block:: yaml
tmp_dir_path: directory
Intermediate files are stored in a subdirectory of :file:`directory`.
The pipeline execution will make an effort to clean this up. If
omitted, defaults to :file:`/tmp`.
.. code-block:: yaml
third_dir_path: directory
External packages such as NLP taggers are stored in subdirectories
of :file:`directory`; these are typically individually configured
with `path_in_third` options relative to this directory.
.. code-block:: yaml
rate_log_interval: 500
When this many items have been processed, log an INFO-level log
message giving the current progress.
.. code-block:: yaml
input_item_limit: 500
Stop processing after reading this many stream items. (Default:
process the entire stream)
.. code-block:: yaml
cleanup_tmp_files: true
After execution finishes, delete the per-execution subdirectory of
``tmp_dir_path``.
.. code-block:: yaml
assert_single_source: false
Normally a set of stream items has a consistent
:attr:`streamcorpus.StreamItem.source` value, and the pipeline
framework will stop if it sees different values here. Set this to
``false`` to disable this check. (Default: do assert a single source
value)
.. code-block:: yaml
output_chunk_max_count: 500
After this many items have been written, close and re-open the output.
(Default: 500 items)
.. code-block:: yaml
output_max_clean_visible_bytes: 1000000
After this much :attr:`streamcorpus.StreamItem.clean_visible` content
has been written, close and re-open the output. (Default: write
entire output in one batch)
.. code-block:: yaml
external_stages_path: stages.py
The file :file:`stages.py` is a Python module that declares a
top-level dictionary named `Stages`, a map from stage name to
implementing class. Stages defined in this file can be used in any of
the appropriate stage lists.
.. code-block:: yaml
external_stages_modules: [ example.stages ]
The Python module :mod:`example.stages` declares a top-level
dictionary named `Stages`, a map from stage name to implementing
class. The named modules must be on :data:`sys.path` so that the
Python interpreter can find it. Stages defined in these modules can
be used in any of the appropriate stage lists.
API
===
The standard execution path is to pass the
:mod:`streamcorpus_pipeline` module to
:func:`yakonfig.parse_args`, then use :class:`PipelineFactory` to
create :class:`Pipeline` objects from the resulting configuration.
.. todo:: Make the top-level configuration point
:class:`PipelineFactory`, not the
:mod:`streamcorpus_pipeline` module
.. autoclass:: PipelineFactory
:members:
.. autoclass:: Pipeline
:members:
'''
from __future__ import absolute_import
import logging
import os
import threading
import time
import uuid
try:
import gevent
except ImportError:
gevent = None
import streamcorpus
from streamcorpus_pipeline._exceptions import TransformGivingUp, \
InvalidStreamItem
from streamcorpus_pipeline.util import rmtree
logger = logging.getLogger(__name__)
class PipelineFactory(object):
'''Factory to create :class:`Pipeline` objects from configuration.
Call this to get a :class:`Pipeline` object. Typical programmatic
use:
.. code-block:: python
parser = argparse.ArgumentParser()
args = yakonfig.parse_args([yakonfig, streamcorpus_pipeline])
factory = PipelineFactory(StageRegistry())
pipeline = factory(yakonfig.get_global_config('streamcorpus_pipeline'))
This factory class will instantiate all of the stages named in the
`streamcorpus_pipeline` configuration. These stages will be created
with their corresponding configuration, except that they have two
keys added, ``tmp_dir_path`` and ``third_dir_path``, from the
top-level configuration.
.. automethod:: __init__
.. automethod:: __call__
.. attribute:: registry
The :class:`streamcorpus_pipeline.stages.StageRegistry` used
to find pipeline stages.
.. attribute:: tmp_dir_suffix
A string value that is appended to ``tmp_dir_path`` when
creating pipeline stages. If :const:`None`, use the top-level
``tmp_dir_path`` configuration directly.
.. attribute:: lock
A :class:`threading.Lock` to protect against concurrent
modification of `tmp_dir_suffix`.
'''
def __init__(self, registry):
'''Create a pipeline factory.
:param dict config: top-level "streamcorpus_pipeline" configuration
:param registry: registry of stages
:type registry: :class:`~streamcorpus_pipeline.stages.StageRegistry`
'''
super(PipelineFactory, self).__init__()
self.registry = registry
self.lock = threading.Lock()
self.tmp_dir_suffix = None
def create(self, stage, scp_config, config=None):
'''Create a pipeline stage.
Instantiates `stage` with `config`. This essentially
translates to ``stage(config)``, except that two keys from
`scp_config` are injected into the configuration:
``tmp_dir_path`` is an execution-specific directory from
combining the top-level ``tmp_dir_path`` configuration with
:attr:`tmp_dir_suffix`; and ``third_dir_path`` is the same
path from the top-level configuration. `stage` may be either
a callable returning the stage (e.g. its class), or its name
in the configuration.
`scp_config` is the configuration for the pipeline as a
whole, and is required. `config` is the configuration for
the stage; if it is :const:`None` then it is extracted
from `scp_config`.
If you already have a fully formed configuration block
and want to create a stage, you can call
.. code-block:: python
factory.registry[stage](stage_config)
In most cases if you have a stage class object and want to
instantiate it with its defaults you can call
.. code-block:: python
stage = stage_cls(stage_cls.default_config)
.. note:: This mirrors
:meth:`yakonfig.factory.AutoFactory.create`, with
some thought that this factory class might migrate
to using that as a base in the future.
:param stage: pipeline stage class, or its name in the registry
:param dict scp_config: configuration block for the pipeline
:param dict config: configuration block for the stage, or
:const:`None` to get it from `scp_config`
'''
# Figure out what we have for a stage and its name
if isinstance(stage, basestring):
stage_name = stage
stage_obj = self.registry[stage_name]
else:
stage_name = getattr(stage, 'config_name', stage.__name__)
stage_obj = stage
# Find the configuration; get a copy we can mutate
if config is None:
config = scp_config.get(stage_name, None)
if config is None:
config = getattr(stage_obj, 'default_config', {})
config = dict(config)
# Fill in more values
if self.tmp_dir_suffix is None:
config['tmp_dir_path'] = scp_config['tmp_dir_path']
else:
config['tmp_dir_path'] = os.path.join(scp_config['tmp_dir_path'],
self.tmp_dir_suffix)
config['third_dir_path'] = scp_config['third_dir_path']
return stage_obj(config)
def _init_stage(self, config, name):
'''Create a single indirect stage.
`name` should be the name of a config item that holds the
name of a stage, for instance, ``reader``. This looks up
the name of that stage, then creates and returns the
stage named. For instance, if the config says
.. code-block:: yaml
reader: from_local_chunks
then calling ``self._init_stage(scp_config, 'reader')`` will
return a new instance of the
:class:`~streamcorpus_pipeline._local_storage.from_local_chunks`
stage.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of stage name entry
:return: new instance of the stage
'''
return self.create(config[name], config)
def _init_stages(self, config, name):
'''Create a list of indirect stages.
`name` should be the name of a config item that holds a list
of names of stages, for instance, ``writers``. This looks up
the names of those stages, then creates and returns the
corresponding list of stage objects. For instance, if the
config says
.. code-block:: yaml
incremental_transforms: [clean_html, clean_visible]
then calling ``self._init_stages(scp_config,
'incremental_transforms')`` will return a list of the two
named stage instances.
:param dict config: `streamcorpus_pipeline` configuration block
:param str name: name of the stage name list entry
:return: list of new stage instances
'''
if name not in config:
return []
return [self.create(stage, config) for stage in config[name]]
def _init_all_stages(self, config):
'''Create stages that are used for the pipeline.
:param dict config: `streamcorpus_pipeline` configuration
:return: tuple of (reader, incremental transforms, batch
transforms, post-batch incremental transforms, writers,
temporary directory)
'''
reader = self._init_stage(config, 'reader')
incremental_transforms = self._init_stages(
config, 'incremental_transforms')
batch_transforms = self._init_stages(config, 'batch_transforms')
post_batch_incremental_transforms = self._init_stages(
config, 'post_batch_incremental_transforms')
writers = self._init_stages(config, 'writers')
tmp_dir_path = os.path.join(config['tmp_dir_path'],
self.tmp_dir_suffix)
return (reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers, tmp_dir_path)
def __call__(self, config):
'''Create a :class:`Pipeline`.
Pass in the configuration under the ``streamcorpus_pipeline``
block, not the top-level configuration that contains it.
If :attr:`tmp_dir_suffix` is :const:`None`, then locks the
factory and creates stages with a temporary (UUID) value. If
the configuration has `cleanup_tmp_files` set to :const:`True`
(the default) then executing the resulting pipeline will clean
up the directory afterwards.
:param dict config: `streamcorpus_pipeline` configuration
:return: new pipeline instance
'''
tmp_dir_suffix = self.tmp_dir_suffix
if tmp_dir_suffix is None:
with self.lock:
self.tmp_dir_suffix = str(uuid.uuid4())
try:
(reader, incremental_transforms, batch_transforms,
pbi_transforms, writers,
tmp_dir_path) = self._init_all_stages(config)
finally:
self.tmp_dir_suffix = None
else:
(reader, incremental_transforms, batch_transforms,
pbi_transforms, writers,
tmp_dir_path) = self._init_all_stages(config)
return Pipeline(
rate_log_interval=config['rate_log_interval'],
input_item_limit=config.get('input_item_limit'),
cleanup_tmp_files=config['cleanup_tmp_files'],
tmp_dir_path=tmp_dir_path,
assert_single_source=config['assert_single_source'],
output_chunk_max_count=config.get('output_chunk_max_count'),
output_max_clean_visible_bytes=config.get(
'output_max_clean_visible_bytes'),
reader=reader,
incremental_transforms=incremental_transforms,
batch_transforms=batch_transforms,
post_batch_incremental_transforms=pbi_transforms,
writers=writers,
)
class Pipeline(object):
'''Pipeline for extracting data into StreamItem instances.
The pipeline has five sets of stages. The *reader* stage reads
from some input source and produces a series of StreamItem objects
out. *Incremental transforms* take single StreamItem objects in
and produce single StreamItem objects out. *Batch transforms* run
on the entire set of StreamItem objects together. There is a
further set of *post-batch incremental transforms* which again run
on individual StreamItem objects. Finally, any number of *writers*
send output somewhere, usually a streamcorpus.Chunk file.
.. automethod:: __init__
.. automethod:: run
.. automethod:: _process_task
'''
def __init__(self, rate_log_interval, input_item_limit,
cleanup_tmp_files, tmp_dir_path, assert_single_source,
output_chunk_max_count, output_max_clean_visible_bytes,
reader, incremental_transforms, batch_transforms,
post_batch_incremental_transforms, writers):
'''Create a new pipeline object.
.. todo:: make this callable with just the lists of stages
and give sane (language-level) defaults for the rest
:param int rate_log_interval: print progress every time this
many input items have been processed
:param int input_item_limit: stop after this many items
:param bool cleanup_tmp_files: delete `tmp_dir_path` after
execution if true
:param str tmp_dir_path: path for intermediate files
:param bool assert_single_source: require all items to have
the same source value if true
:param int output_chunk_max_count: restart output after
writing this many items
:param int output_max_clean_visible_bytes: restart output after
writing this much content
:param callable reader: reader stage object
:param incremental_transforms: single-item transformation stages
:paramtype incremental_transforms: list of callable
:param batch_transforms: chunk-file transformation stages
:paramtype batch_transforms: list of callable
:param post_batch_incremental_transforms: single-item transformation
stages
:paramtype post_batch_incremental_transforms: list of callable
:param writers: output stages
:paramtype writers: list of callable
'''
self.rate_log_interval = rate_log_interval
self.input_item_limit = input_item_limit
self.cleanup_tmp_files = cleanup_tmp_files
self.tmp_dir_path = tmp_dir_path
self.assert_single_source = assert_single_source
self.output_chunk_max_count = output_chunk_max_count
self.output_max_clean_visible_bytes = output_max_clean_visible_bytes
# stages that get passed in:
self.reader = reader
self.incremental_transforms = incremental_transforms
self.batch_transforms = batch_transforms
self.pbi_stages = post_batch_incremental_transforms
self.writers = writers
# current Chunk output file for incremental transforms
self.t_chunk = None
# context allows stages to communicate with later stages
self.context = dict(
i_str=None,
data=None,
)
self.work_unit = None
def _process_task(self, work_unit):
'''Process a :class:`coordinate.WorkUnit`.
The work unit's key is taken as the input file name. The
data should have ``start_count`` and ``start_chunk_time``
values, which are passed on to :meth:`run`.
:param work_unit: work unit to process
:paramtype work_unit: :class:`coordinate.WorkUnit`
:return: number of stream items processed
'''
self.work_unit = work_unit
i_str = work_unit.key
start_count = work_unit.data['start_count']
start_chunk_time = work_unit.data['start_chunk_time']
self.run(i_str, start_count, start_chunk_time)
def run(self, i_str, start_count=0, start_chunk_time=None):
'''Run the pipeline.
This runs all of the steps described in the pipeline constructor,
reading from some input and writing to some output.
:param str i_str: name of the input file, or other reader-specific
description of where to get input
:param int start_count: index of the first stream item
:param int start_chunk_time: timestamp for the first stream item
'''
try:
if not os.path.exists(self.tmp_dir_path):
os.makedirs(self.tmp_dir_path)
if start_chunk_time is None:
start_chunk_time = time.time()
## the reader returns generators of StreamItems
i_chunk = self.reader(i_str)
## t_path points to the currently in-progress temp chunk
t_path = None
## loop over all docs in the chunk processing and cutting
## smaller chunks if needed
len_clean_visible = 0
sources = set()
next_idx = 0
## how many have we input and actually done processing on?
input_item_count = 0
for si in i_chunk:
# TODO: break out a _process_stream_item function?
next_idx += 1
## yield to the gevent hub to allow other things to run
if gevent:
gevent.sleep(0)
## skip forward until we reach start_count
if next_idx <= start_count:
continue
if next_idx % self.rate_log_interval == 0:
## indexing is zero-based, so next_idx corresponds
## to length of list of SIs processed so far
elapsed = time.time() - start_chunk_time
if elapsed > 0:
rate = float(next_idx) / elapsed
logger.info('%d in %.1f --> %.1f per sec on '
'(pre-partial_commit) %s',
next_idx - start_count, elapsed, rate,
i_str)
if not self.t_chunk:
## make a temporary chunk at a temporary path
# (Lazy allocation after we've read an item that might get processed out to the new chunk file)
# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready
t_path = os.path.join(self.tmp_dir_path,
't_chunk-%s' % uuid.uuid4().hex)
self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb')
assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message
# TODO: a set of incremental transforms is equivalent
# to a batch transform. Make the pipeline explicitly
# configurable as such:
#
# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]
#
# OR: for some list of transforms (mixed incremental
# and batch) pipeline can detect and batchify as needed
## incremental transforms populate t_chunk
## let the incremental transforms destroy the si by
## returning None
si = self._run_incremental_transforms(
si, self.incremental_transforms)
## insist that every chunk has only one source string
if si:
sources.add(si.source)
if self.assert_single_source and len(sources) != 1:
raise InvalidStreamItem(
'stream item %r had source %r, not %r '
'(set assert_single_source: false to suppress)' %
(si.stream_id, si.source, sources))
if si and si.body and si.body.clean_visible:
len_clean_visible += len(si.body.clean_visible)
## log binned clean_visible lengths, for quick stats estimates
#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))
#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))
if ((self.output_chunk_max_count is not None and
len(self.t_chunk) == self.output_chunk_max_count)):
logger.info('reached output_chunk_max_count (%d) at: %d',
len(self.t_chunk), next_idx)
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
elif (self.output_max_clean_visible_bytes is not None and
len_clean_visible >=
self.output_chunk_max_clean_visible_bytes):
logger.info(
'reached output_chunk_max_clean_visible_bytes '
'(%d) at: %d',
self.output_chunk_max_clean_visible_bytes,
len_clean_visible)
len_clean_visible = 0
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
start_count = next_idx
input_item_count += 1
if (((self.input_item_limit is not None) and
(input_item_count > self.input_item_limit))):
break
if self.t_chunk is not None:
self._process_output_chunk(
start_count, next_idx, sources, i_str, t_path)
## return how many stream items we processed
return next_idx
finally:
if self.t_chunk is not None:
self.t_chunk.close()
for transform in self.batch_transforms:
transform.shutdown()
if self.cleanup_tmp_files:
rmtree(self.tmp_dir_path)
def _process_output_chunk(self, start_count, next_idx, sources, i_str,
t_path):
'''
for the current output chunk (which should be open):
1. run batch transforms
2. run post-batch incremental transforms
3. run 'writers' to load-out the data to files or other storage
return list of paths that writers wrote to
'''
if not self.t_chunk:
# nothing to do
return []
self.t_chunk.close()
# gather the paths as the writers run
o_paths = None
if len(self.t_chunk) > 0:
# only batch transform and load if the chunk
# isn't empty, which can happen when filtering
# with stages like "find"
# batch transforms act on the whole chunk in-place
logger.info('running batch transforms on %d StreamItems',
len(self.t_chunk))
self._run_batch_transforms(t_path)
self._maybe_run_post_batch_incremental_transforms(t_path)
# only proceed if above transforms left us with something
if (self.t_chunk) and (len(self.t_chunk) >= 0):
o_paths = self._run_writers(start_count, next_idx, sources,
i_str, t_path)
# we're now officially done with the chunk
self.t_chunk = None
# If we wrote some paths, update the data dictionary of outputs
if self.work_unit and o_paths:
old_o_paths = self.work_unit.data.get('output', [])
o_paths = old_o_paths + o_paths
self.work_unit.data['start_count'] = next_idx
self.work_unit.data['output'] = o_paths
self.work_unit.update()
def _run_batch_transforms(self, chunk_path):
'''Run all of the batch transforms over some intermediate chunk.'''
for transform in self.batch_transforms:
transform.process_path(chunk_path)
def _maybe_run_post_batch_incremental_transforms(self, t_path):
## Run post batch incremental (pbi) transform stages.
## These exist because certain batch transforms have
## to run before certain incremental stages.
if self.pbi_stages:
t_path2 = os.path.join(self.tmp_dir_path, 'trec-kba-pipeline-tmp-%s' % str(uuid.uuid1()))
# open destination for _run_incremental_transforms to write to
self.t_chunk = streamcorpus.Chunk(path=t_path2, mode='wb')
input_t_chunk = streamcorpus.Chunk(path=t_path, mode='rb')
for si in input_t_chunk:
self._run_incremental_transforms(si, self.pbi_stages)
self.t_chunk.close()
os.rename(t_path2, t_path)
def _run_writers(self, start_count, next_idx, sources, i_str, t_path):
'''Run all of the writers over some intermediate chunk.
:param int start_count: index of the first item
:param int next_idx: index of the next item (after the last
item in this chunk)
:param list sources: source strings included in this chunk
(usually only one source)
:param str i_str: name of input file or other input
:param str t_path: location of intermediate chunk on disk
:return: list of output file paths or other outputs
'''
# writers put the chunk somewhere, and could delete it
name_info = dict(
first=start_count,
# num and md5 computed in each writers
source=sources.pop(),
)
all_o_paths = []
for writer in self.writers:
logger.debug('running %r on %r: %r', writer, i_str, name_info)
o_paths = writer(t_path, name_info, i_str)
logger.debug('loaded (%d, %d) of %r into %r',
start_count, next_idx - 1, i_str, o_paths)
all_o_paths += o_paths
return all_o_paths
def _run_incremental_transforms(self, si, transforms):
'''
Run transforms on stream item.
Item may be discarded by some transform.
Writes successful items out to current self.t_chunk
Returns transformed item or None.
'''
## operate each transform on this one StreamItem
for transform in transforms:
try:
stream_id = si.stream_id
si_new = transform(si, context=self.context)
if si_new is None:
logger.warn('transform %r deleted %s abs_url=%r',
transform, stream_id, si and si.abs_url)
return None
si = si_new
except TransformGivingUp:
## do nothing
logger.info('transform %r giving up on %r',
transform, si.stream_id)
except Exception, exc:
logger.critical(
'transform %r failed on %r from i_str=%r abs_url=%r',
transform, si and si.stream_id, self.context.get('i_str'),
si and si.abs_url, exc_info=True)
assert si is not None
## expect to always have a stream_time
if not si.stream_time:
raise InvalidStreamItem('empty stream_time: %s' % si)
if si.stream_id is None:
raise InvalidStreamItem('empty stream_id: %r' % si)
## put the StreamItem into the output
if type(si) != streamcorpus.StreamItem_v0_3_0:
raise InvalidStreamItem('incorrect stream item object %r' %
type(si))
self.t_chunk.add(si)
return si
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.volume_types.qos_specs \
import views
from openstack_dashboard.test import helpers as test
class QosSpecsTests(test.BaseAdminViewTests):
@test.create_stubs({api.cinder: ('qos_spec_get',), })
def test_manage_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
index_url = reverse(
'horizon:admin:volume_types:qos_specs:index',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
self.mox.ReplayAll()
res = self.client.get(index_url)
self.assertTemplateUsed(
res, 'admin/volume_types/qos_specs/index.html')
rows = res.context['table'].get_rows()
specs = self.cinder_qos_specs.first().specs
for row in rows:
key = row.cells['key'].data
self.assertIn(key, specs)
self.assertEqual(row.cells['value'].data,
specs.get(key))
@test.create_stubs({api.cinder: ('qos_spec_create',)})
def test_create_qos_spec(self):
formData = {'name': 'qos-spec-1',
'consumer': 'back-end'}
api.cinder.qos_spec_create(IsA(http.HttpRequest),
formData['name'],
{'consumer': formData['consumer']}).\
AndReturn(self.cinder_qos_specs.first())
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volume_types:create_qos_spec'),
formData)
redirect = reverse('horizon:admin:volume_types:index')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('volume_type_list_with_qos_associations',
'volume_encryption_type_list',
'qos_spec_list',
'qos_spec_delete',)})
def test_delete_qos_spec(self):
qos_spec = self.cinder_qos_specs.first()
formData = {'action': 'qos_specs__delete__%s' % qos_spec.id}
api.cinder.volume_type_list_with_qos_associations(
IsA(http.HttpRequest)).\
AndReturn(self.cinder_volume_types.list())
api.cinder.volume_encryption_type_list(IsA(http.HttpRequest))\
.AndReturn(self.cinder_volume_encryption_types.list()[0:1])
api.cinder.qos_spec_list(IsA(http.HttpRequest)).\
AndReturn(self.cinder_qos_specs.list())
api.cinder.qos_spec_delete(IsA(http.HttpRequest),
str(qos_spec.id))
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volume_types:index'),
formData)
redirect = reverse('horizon:admin:volume_types:index')
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, redirect)
self.assertMessageCount(success=1)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_get_keys',
'qos_spec_set_keys',), })
def test_spec_edit(self):
qos_spec = self.cinder_qos_specs.first()
key = 'minIOPS'
edit_url = reverse('horizon:admin:volume_types:qos_specs:edit',
args=[qos_spec.id, key])
index_url = reverse('horizon:admin:volume_types:index')
data = {'value': '9999'}
qos_spec.specs[key] = data['value']
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id)\
.AndReturn(qos_spec)
api.cinder.qos_spec_get_keys(IsA(http.HttpRequest),
qos_spec.id, raw=True)\
.AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
qos_spec.specs)
self.mox.ReplayAll()
resp = self.client.post(edit_url, data)
self.assertEqual('admin/volume_types/qos_specs/edit.html',
views.EditKeyValuePairView.template_name)
self.assertEqual('horizon:admin:volume_types:qos_specs:edit',
views.EditKeyValuePairView.submit_url)
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, index_url)
@test.create_stubs({api.cinder: ('qos_spec_get',
'qos_spec_set_keys',), })
def test_edit_consumer(self):
qos_spec = self.cinder_qos_specs.first()
# modify consumer to 'front-end'
formData = {'consumer_choice': 'front-end'}
edit_url = reverse(
'horizon:admin:volume_types:edit_qos_spec_consumer',
args=[qos_spec.id])
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_spec.id).AndReturn(qos_spec)
api.cinder.qos_spec_set_keys(IsA(http.HttpRequest),
qos_spec.id,
{'consumer': formData['consumer_choice']})
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volume_types:index')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
@test.create_stubs({api.cinder: ('qos_spec_list',
'qos_spec_get',
'qos_spec_get_associations',
'volume_type_get',
'qos_spec_associate',
'qos_spec_disassociate'), })
def test_associate_qos_spec(self):
volume_type = self.cinder_volume_types.first()
volume_types = self.cinder_volume_types.list()
qos_specs = self.cinder_qos_specs.list()
# associate qos spec with volume type
formData = {'qos_spec_choice': qos_specs[0].id}
edit_url = reverse(
'horizon:admin:volume_types:manage_qos_spec_association',
args=[volume_type.id])
# for maximum code coverage, this test swaps the QoS association
# on one volume type moving the QoS assigned from 1 to 0
api.cinder.volume_type_get(IsA(http.HttpRequest),
volume_type.id) \
.AndReturn(volume_type)
api.cinder.qos_spec_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(qos_specs)
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_specs[0].id) \
.AndReturn([])
api.cinder.qos_spec_get_associations(IsA(http.HttpRequest),
qos_specs[1].id) \
.AndReturn(volume_types)
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_specs[1].id).AndReturn(qos_specs[1])
api.cinder.qos_spec_disassociate(IsA(http.HttpRequest),
qos_specs[1],
volume_type.id)
api.cinder.qos_spec_get(IsA(http.HttpRequest),
qos_specs[0].id).AndReturn(qos_specs[0])
api.cinder.qos_spec_associate(IsA(http.HttpRequest),
qos_specs[0],
volume_type.id)
self.mox.ReplayAll()
resp = self.client.post(edit_url, formData)
redirect = reverse('horizon:admin:volume_types:index')
self.assertNoFormErrors(resp)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(resp, redirect)
| |
"""Test the cloud.iot module."""
from unittest.mock import patch, MagicMock
from aiohttp import web
import pytest
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from homeassistant.components.cloud import DOMAIN
from homeassistant.components.cloud.const import (
PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE)
from tests.components.alexa import test_smart_home as test_alexa
from tests.common import mock_coro
from . import mock_cloud_prefs, mock_cloud
@pytest.fixture
def mock_cloud_inst():
"""Mock cloud class."""
return MagicMock(subscription_expired=False)
async def test_handler_alexa(hass):
"""Test handler Alexa."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
await mock_cloud(hass, {
'alexa': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'description': 'Config description',
'display_categories': 'LIGHT'
}
}
}
})
mock_cloud_prefs(hass)
cloud = hass.data['cloud']
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
endpoints = resp['event']['payload']['endpoints']
assert len(endpoints) == 1
device = endpoints[0]
assert device['description'] == 'Config description'
assert device['friendlyName'] == 'Config name'
assert device['displayCategories'] == ['LIGHT']
assert device['manufacturerName'] == 'Home Assistant'
async def test_handler_alexa_disabled(hass, mock_cloud_fixture):
"""Test handler Alexa when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_ALEXA] = False
cloud = hass.data['cloud']
resp = await cloud.client.async_alexa_message(
test_alexa.get_new_request('Alexa.Discovery', 'Discover'))
assert resp['event']['header']['namespace'] == 'Alexa'
assert resp['event']['header']['name'] == 'ErrorResponse'
assert resp['event']['payload']['type'] == 'BRIDGE_UNREACHABLE'
async def test_handler_google_actions(hass):
"""Test handler Google Actions."""
hass.states.async_set(
'switch.test', 'on', {'friendly_name': "Test switch"})
hass.states.async_set(
'switch.test2', 'on', {'friendly_name': "Test switch 2"})
hass.states.async_set(
'group.all_locks', 'on', {'friendly_name': "Evil locks"})
await mock_cloud(hass, {
'google_actions': {
'filter': {
'exclude_entities': 'switch.test2'
},
'entity_config': {
'switch.test': {
'name': 'Config name',
'aliases': 'Config alias',
'room': 'living room'
}
}
}
})
mock_cloud_prefs(hass)
cloud = hass.data['cloud']
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
with patch(
'hass_nabucasa.Cloud._decode_claims',
return_value={'cognito:username': 'myUserName'}
):
resp = await cloud.client.async_google_message(data)
assert resp['requestId'] == reqid
payload = resp['payload']
assert payload['agentUserId'] == 'myUserName'
devices = payload['devices']
assert len(devices) == 1
device = devices[0]
assert device['id'] == 'switch.test'
assert device['name']['name'] == 'Config name'
assert device['name']['nicknames'] == ['Config alias']
assert device['type'] == 'action.devices.types.SWITCH'
assert device['roomHint'] == 'living room'
async def test_handler_google_actions_disabled(hass, mock_cloud_fixture):
"""Test handler Google Actions when user has disabled it."""
mock_cloud_fixture[PREF_ENABLE_GOOGLE] = False
with patch('hass_nabucasa.Cloud.start', return_value=mock_coro()):
assert await async_setup_component(hass, 'cloud', {})
reqid = '5711642932632160983'
data = {'requestId': reqid, 'inputs': [{'intent': 'action.devices.SYNC'}]}
cloud = hass.data['cloud']
resp = await cloud.client.async_google_message(data)
assert resp['requestId'] == reqid
assert resp['payload']['errorCode'] == 'deviceTurnedOff'
async def test_webhook_msg(hass):
"""Test webhook msg."""
with patch('hass_nabucasa.Cloud.start', return_value=mock_coro()):
setup = await async_setup_component(hass, 'cloud', {
'cloud': {}
})
assert setup
cloud = hass.data['cloud']
await cloud.client.prefs.async_initialize()
await cloud.client.prefs.async_update(cloudhooks={
'hello': {
'webhook_id': 'mock-webhook-id',
'cloudhook_id': 'mock-cloud-id'
}
})
received = []
async def handler(hass, webhook_id, request):
"""Handle a webhook."""
received.append(request)
return web.json_response({'from': 'handler'})
hass.components.webhook.async_register(
'test', 'Test', 'mock-webhook-id', handler)
response = await cloud.client.async_webhook_message({
'cloudhook_id': 'mock-cloud-id',
'body': '{"hello": "world"}',
'headers': {
'content-type': 'application/json'
},
'method': 'POST',
'query': None,
})
assert response == {
'status': 200,
'body': '{"from": "handler"}',
'headers': {
'Content-Type': 'application/json'
}
}
assert len(received) == 1
assert await received[0].json() == {
'hello': 'world'
}
async def test_google_config_expose_entity(
hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config exposing entity method uses latest config."""
cloud_client = hass.data[DOMAIN].client
state = State('light.kitchen', 'on')
assert cloud_client.google_config.should_expose(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id='light.kitchen',
should_expose=False,
)
assert not cloud_client.google_config.should_expose(state)
async def test_google_config_should_2fa(
hass, mock_cloud_setup, mock_cloud_login):
"""Test Google config disabling 2FA method uses latest config."""
cloud_client = hass.data[DOMAIN].client
state = State('light.kitchen', 'on')
assert cloud_client.google_config.should_2fa(state)
await cloud_client.prefs.async_update_google_entity_config(
entity_id='light.kitchen',
disable_2fa=True,
)
assert not cloud_client.google_config.should_2fa(state)
| |
# Copyright 2012 SINA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.api.openstack.compute.contrib import attach_interfaces
from nova.compute import api as compute_api
from nova import context
from nova import exception
from nova.network import api as network_api
from nova.openstack.common import jsonutils
from nova import test
from nova.tests import fake_network_cache_model
import webob
from webob import exc
CONF = cfg.CONF
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
FAKE_PORT_ID1 = '11111111-1111-1111-1111-111111111111'
FAKE_PORT_ID2 = '22222222-2222-2222-2222-222222222222'
FAKE_PORT_ID3 = '33333333-3333-3333-3333-333333333333'
FAKE_NET_ID1 = '44444444-4444-4444-4444-444444444444'
FAKE_NET_ID2 = '55555555-5555-5555-5555-555555555555'
FAKE_NET_ID3 = '66666666-6666-6666-6666-666666666666'
port_data1 = {
"id": FAKE_PORT_ID1,
"network_id": FAKE_NET_ID1,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "aa:aa:aa:aa:aa:aa",
"fixed_ips": ["10.0.1.2"],
"device_id": FAKE_UUID1,
}
port_data2 = {
"id": FAKE_PORT_ID2,
"network_id": FAKE_NET_ID2,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": FAKE_UUID1,
}
port_data3 = {
"id": FAKE_PORT_ID3,
"network_id": FAKE_NET_ID3,
"admin_state_up": True,
"status": "ACTIVE",
"mac_address": "bb:bb:bb:bb:bb:bb",
"fixed_ips": ["10.0.2.2"],
"device_id": '',
}
fake_networks = [FAKE_NET_ID1, FAKE_NET_ID2]
ports = [port_data1, port_data2, port_data3]
def fake_list_ports(self, *args, **kwargs):
result = []
for port in ports:
if port['device_id'] == kwargs['device_id']:
result.append(port)
return {'ports': result}
def fake_show_port(self, context, port_id, **kwargs):
for port in ports:
if port['id'] == port_id:
return {'port': port}
def fake_attach_interface(self, context, instance, network_id, port_id,
requested_ip='192.168.1.3'):
if not network_id:
# if no network_id is given when add a port to an instance, use the
# first default network.
network_id = fake_networks[0]
if not port_id:
port_id = ports[fake_networks.index(network_id)]['id']
vif = fake_network_cache_model.new_vif()
vif['id'] = port_id
vif['network']['id'] = network_id
vif['network']['subnets'][0]['ips'][0]['address'] = requested_ip
return vif
def fake_detach_interface(self, context, instance, port_id):
for port in ports:
if port['id'] == port_id:
return
raise exception.PortNotFound(port_id=port_id)
def fake_get_instance(self, context, intance_id):
return {}
class InterfaceAttachTests(test.NoDBTestCase):
def setUp(self):
super(InterfaceAttachTests, self).setUp()
self.flags(neutron_auth_strategy=None)
self.flags(neutron_url='http://anyhost/')
self.flags(neutron_url_timeout=30)
self.stubs.Set(network_api.API, 'show_port', fake_show_port)
self.stubs.Set(network_api.API, 'list_ports', fake_list_ports)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.context = context.get_admin_context()
self.expected_show = {'interfaceAttachment':
{'net_id': FAKE_NET_ID1,
'port_id': FAKE_PORT_ID1,
'mac_addr': port_data1['mac_address'],
'port_state': port_data1['status'],
'fixed_ips': port_data1['fixed_ips'],
}}
def test_show(self):
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.show(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual(self.expected_show, result)
def test_show_invalid(self):
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/show')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.show, req, FAKE_UUID2, FAKE_PORT_ID1)
def test_delete(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.delete(req, FAKE_UUID1, FAKE_PORT_ID1)
self.assertEqual('202 Accepted', result.status)
def test_delete_interface_not_found(self):
self.stubs.Set(compute_api.API, 'detach_interface',
fake_detach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/delete')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPNotFound,
attachments.delete,
req,
FAKE_UUID1,
'invaid-port-id')
def test_attach_interface_without_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
req.body = jsonutils.dumps({})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID1)
def test_attach_interface_with_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
{'net_id': FAKE_NET_ID2}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
result = attachments.create(req, FAKE_UUID1, jsonutils.loads(req.body))
self.assertEqual(result['interfaceAttachment']['net_id'],
FAKE_NET_ID2)
def test_attach_interface_with_port_and_network_id(self):
self.stubs.Set(compute_api.API, 'attach_interface',
fake_attach_interface)
attachments = attach_interfaces.InterfaceAttachmentController()
req = webob.Request.blank('/v2/fake/os-interfaces/attach')
req.method = 'POST'
req.body = jsonutils.dumps({'interfaceAttachment':
{'port_id': FAKE_PORT_ID1,
'net_id': FAKE_NET_ID2}})
req.headers['content-type'] = 'application/json'
req.environ['nova.context'] = self.context
self.assertRaises(exc.HTTPBadRequest,
attachments.create, req, FAKE_UUID1,
jsonutils.loads(req.body))
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.sync.v1.service.document import DocumentList
from twilio.rest.sync.v1.service.sync_list import SyncListList
from twilio.rest.sync.v1.service.sync_map import SyncMapList
from twilio.rest.sync.v1.service.sync_stream import SyncStreamList
class ServiceList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the ServiceList
:param Version version: Version that contains the resource
:returns: twilio.rest.sync.v1.service.ServiceList
:rtype: twilio.rest.sync.v1.service.ServiceList
"""
super(ServiceList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Services'.format(**self._solution)
def create(self, friendly_name=values.unset, webhook_url=values.unset,
reachability_webhooks_enabled=values.unset, acl_enabled=values.unset,
reachability_debouncing_enabled=values.unset,
reachability_debouncing_window=values.unset,
webhooks_from_rest_enabled=values.unset):
"""
Create a new ServiceInstance
:param unicode friendly_name: A string that you assign to describe the resource
:param unicode webhook_url: The URL we should call when Sync objects are manipulated
:param bool reachability_webhooks_enabled: Whether the service instance should call webhook_url when client endpoints connect to Sync
:param bool acl_enabled: Whether token identities in the Service must be granted access to Sync objects by using the Permissions resource
:param bool reachability_debouncing_enabled: Whether every endpoint_disconnected event occurs after a configurable delay
:param unicode reachability_debouncing_window: The reachability event delay in milliseconds
:param bool webhooks_from_rest_enabled: Whether the Service instance should call webhook_url when the REST API is used to update Sync objects
:returns: Newly created ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'WebhookUrl': webhook_url,
'ReachabilityWebhooksEnabled': reachability_webhooks_enabled,
'AclEnabled': acl_enabled,
'ReachabilityDebouncingEnabled': reachability_debouncing_enabled,
'ReachabilityDebouncingWindow': reachability_debouncing_window,
'WebhooksFromRestEnabled': webhooks_from_rest_enabled,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ServiceInstance(self._version, payload, )
def stream(self, limit=None, page_size=None):
"""
Streams ServiceInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.ServiceInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists ServiceInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.sync.v1.service.ServiceInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ServiceInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServicePage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ServicePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ServiceInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServicePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ServicePage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ServiceContext
:param sid: The SID of the Service resource to fetch
:returns: twilio.rest.sync.v1.service.ServiceContext
:rtype: twilio.rest.sync.v1.service.ServiceContext
"""
return ServiceContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a ServiceContext
:param sid: The SID of the Service resource to fetch
:returns: twilio.rest.sync.v1.service.ServiceContext
:rtype: twilio.rest.sync.v1.service.ServiceContext
"""
return ServiceContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.ServiceList>'
class ServicePage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the ServicePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.sync.v1.service.ServicePage
:rtype: twilio.rest.sync.v1.service.ServicePage
"""
super(ServicePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ServiceInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.sync.v1.service.ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
return ServiceInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Sync.V1.ServicePage>'
class ServiceContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, sid):
"""
Initialize the ServiceContext
:param Version version: Version that contains the resource
:param sid: The SID of the Service resource to fetch
:returns: twilio.rest.sync.v1.service.ServiceContext
:rtype: twilio.rest.sync.v1.service.ServiceContext
"""
super(ServiceContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Services/{sid}'.format(**self._solution)
# Dependents
self._documents = None
self._sync_lists = None
self._sync_maps = None
self._sync_streams = None
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ServiceInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def update(self, webhook_url=values.unset, friendly_name=values.unset,
reachability_webhooks_enabled=values.unset, acl_enabled=values.unset,
reachability_debouncing_enabled=values.unset,
reachability_debouncing_window=values.unset,
webhooks_from_rest_enabled=values.unset):
"""
Update the ServiceInstance
:param unicode webhook_url: The URL we should call when Sync objects are manipulated
:param unicode friendly_name: A string that you assign to describe the resource
:param bool reachability_webhooks_enabled: Whether the service instance should call webhook_url when client endpoints connect to Sync
:param bool acl_enabled: Whether token identities in the Service must be granted access to Sync objects by using the Permissions resource
:param bool reachability_debouncing_enabled: Whether every endpoint_disconnected event occurs after a configurable delay
:param unicode reachability_debouncing_window: The reachability event delay in milliseconds
:param bool webhooks_from_rest_enabled: Whether the Service instance should call webhook_url when the REST API is used to update Sync objects
:returns: Updated ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
data = values.of({
'WebhookUrl': webhook_url,
'FriendlyName': friendly_name,
'ReachabilityWebhooksEnabled': reachability_webhooks_enabled,
'AclEnabled': acl_enabled,
'ReachabilityDebouncingEnabled': reachability_debouncing_enabled,
'ReachabilityDebouncingWindow': reachability_debouncing_window,
'WebhooksFromRestEnabled': webhooks_from_rest_enabled,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ServiceInstance(self._version, payload, sid=self._solution['sid'], )
@property
def documents(self):
"""
Access the documents
:returns: twilio.rest.sync.v1.service.document.DocumentList
:rtype: twilio.rest.sync.v1.service.document.DocumentList
"""
if self._documents is None:
self._documents = DocumentList(self._version, service_sid=self._solution['sid'], )
return self._documents
@property
def sync_lists(self):
"""
Access the sync_lists
:returns: twilio.rest.sync.v1.service.sync_list.SyncListList
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListList
"""
if self._sync_lists is None:
self._sync_lists = SyncListList(self._version, service_sid=self._solution['sid'], )
return self._sync_lists
@property
def sync_maps(self):
"""
Access the sync_maps
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapList
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapList
"""
if self._sync_maps is None:
self._sync_maps = SyncMapList(self._version, service_sid=self._solution['sid'], )
return self._sync_maps
@property
def sync_streams(self):
"""
Access the sync_streams
:returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamList
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamList
"""
if self._sync_streams is None:
self._sync_streams = SyncStreamList(self._version, service_sid=self._solution['sid'], )
return self._sync_streams
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.ServiceContext {}>'.format(context)
class ServiceInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, payload, sid=None):
"""
Initialize the ServiceInstance
:returns: twilio.rest.sync.v1.service.ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
super(ServiceInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'unique_name': payload.get('unique_name'),
'account_sid': payload.get('account_sid'),
'friendly_name': payload.get('friendly_name'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'webhook_url': payload.get('webhook_url'),
'webhooks_from_rest_enabled': payload.get('webhooks_from_rest_enabled'),
'reachability_webhooks_enabled': payload.get('reachability_webhooks_enabled'),
'acl_enabled': payload.get('acl_enabled'),
'reachability_debouncing_enabled': payload.get('reachability_debouncing_enabled'),
'reachability_debouncing_window': deserialize.integer(payload.get('reachability_debouncing_window')),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ServiceContext for this ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceContext
"""
if self._context is None:
self._context = ServiceContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def unique_name(self):
"""
:returns: An application-defined string that uniquely identifies the resource
:rtype: unicode
"""
return self._properties['unique_name']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The absolute URL of the Service resource
:rtype: unicode
"""
return self._properties['url']
@property
def webhook_url(self):
"""
:returns: The URL we call when Sync objects are manipulated
:rtype: unicode
"""
return self._properties['webhook_url']
@property
def webhooks_from_rest_enabled(self):
"""
:returns: Whether the Service instance should call webhook_url when the REST API is used to update Sync objects
:rtype: bool
"""
return self._properties['webhooks_from_rest_enabled']
@property
def reachability_webhooks_enabled(self):
"""
:returns: Whether the service instance calls webhook_url when client endpoints connect to Sync
:rtype: bool
"""
return self._properties['reachability_webhooks_enabled']
@property
def acl_enabled(self):
"""
:returns: Whether token identities in the Service must be granted access to Sync objects by using the Permissions resource
:rtype: bool
"""
return self._properties['acl_enabled']
@property
def reachability_debouncing_enabled(self):
"""
:returns: Whether every endpoint_disconnected event occurs after a configurable delay
:rtype: bool
"""
return self._properties['reachability_debouncing_enabled']
@property
def reachability_debouncing_window(self):
"""
:returns: The reachability event delay in milliseconds
:rtype: unicode
"""
return self._properties['reachability_debouncing_window']
@property
def links(self):
"""
:returns: The URLs of related resources
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch a ServiceInstance
:returns: Fetched ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ServiceInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, webhook_url=values.unset, friendly_name=values.unset,
reachability_webhooks_enabled=values.unset, acl_enabled=values.unset,
reachability_debouncing_enabled=values.unset,
reachability_debouncing_window=values.unset,
webhooks_from_rest_enabled=values.unset):
"""
Update the ServiceInstance
:param unicode webhook_url: The URL we should call when Sync objects are manipulated
:param unicode friendly_name: A string that you assign to describe the resource
:param bool reachability_webhooks_enabled: Whether the service instance should call webhook_url when client endpoints connect to Sync
:param bool acl_enabled: Whether token identities in the Service must be granted access to Sync objects by using the Permissions resource
:param bool reachability_debouncing_enabled: Whether every endpoint_disconnected event occurs after a configurable delay
:param unicode reachability_debouncing_window: The reachability event delay in milliseconds
:param bool webhooks_from_rest_enabled: Whether the Service instance should call webhook_url when the REST API is used to update Sync objects
:returns: Updated ServiceInstance
:rtype: twilio.rest.sync.v1.service.ServiceInstance
"""
return self._proxy.update(
webhook_url=webhook_url,
friendly_name=friendly_name,
reachability_webhooks_enabled=reachability_webhooks_enabled,
acl_enabled=acl_enabled,
reachability_debouncing_enabled=reachability_debouncing_enabled,
reachability_debouncing_window=reachability_debouncing_window,
webhooks_from_rest_enabled=webhooks_from_rest_enabled,
)
@property
def documents(self):
"""
Access the documents
:returns: twilio.rest.sync.v1.service.document.DocumentList
:rtype: twilio.rest.sync.v1.service.document.DocumentList
"""
return self._proxy.documents
@property
def sync_lists(self):
"""
Access the sync_lists
:returns: twilio.rest.sync.v1.service.sync_list.SyncListList
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListList
"""
return self._proxy.sync_lists
@property
def sync_maps(self):
"""
Access the sync_maps
:returns: twilio.rest.sync.v1.service.sync_map.SyncMapList
:rtype: twilio.rest.sync.v1.service.sync_map.SyncMapList
"""
return self._proxy.sync_maps
@property
def sync_streams(self):
"""
Access the sync_streams
:returns: twilio.rest.sync.v1.service.sync_stream.SyncStreamList
:rtype: twilio.rest.sync.v1.service.sync_stream.SyncStreamList
"""
return self._proxy.sync_streams
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Sync.V1.ServiceInstance {}>'.format(context)
| |
"""Constants for the Renault integration tests."""
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_PLUG,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.renault.const import (
CONF_KAMEREON_ACCOUNT_ID,
CONF_LOCALE,
DEVICE_CLASS_CHARGE_MODE,
DEVICE_CLASS_CHARGE_STATE,
DEVICE_CLASS_PLUG_STATE,
DOMAIN,
)
from homeassistant.components.select import DOMAIN as SELECT_DOMAIN
from homeassistant.components.select.const import ATTR_OPTIONS
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_STATE,
ATTR_SW_VERSION,
ATTR_UNIT_OF_MEASUREMENT,
CONF_PASSWORD,
CONF_USERNAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ELECTRIC_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
LENGTH_KILOMETERS,
PERCENTAGE,
POWER_KILO_WATT,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
TEMP_CELSIUS,
TIME_MINUTES,
VOLUME_LITERS,
)
ATTR_DEFAULT_DISABLED = "default_disabled"
ATTR_UNIQUE_ID = "unique_id"
FIXED_ATTRIBUTES = (
ATTR_DEVICE_CLASS,
ATTR_OPTIONS,
ATTR_STATE_CLASS,
ATTR_UNIT_OF_MEASUREMENT,
)
DYNAMIC_ATTRIBUTES = (ATTR_ICON,)
ICON_FOR_EMPTY_VALUES = {
"select.reg_number_charge_mode": "mdi:calendar-remove",
"sensor.reg_number_charge_state": "mdi:flash-off",
"sensor.reg_number_plug_state": "mdi:power-plug-off",
}
MOCK_ACCOUNT_ID = "account_id_1"
# Mock config data to be used across multiple tests
MOCK_CONFIG = {
CONF_USERNAME: "email@test.com",
CONF_PASSWORD: "test",
CONF_KAMEREON_ACCOUNT_ID: "account_id_1",
CONF_LOCALE: "fr_FR",
}
MOCK_VEHICLES = {
"zoe_40": {
"expected_device": {
ATTR_IDENTIFIERS: {(DOMAIN, "VF1AAAAA555777999")},
ATTR_MANUFACTURER: "Renault",
ATTR_MODEL: "Zoe",
ATTR_NAME: "REG-NUMBER",
ATTR_SW_VERSION: "X101VE",
},
"endpoints_available": [
True, # cockpit
True, # hvac-status
False, # location
True, # battery-status
True, # charge-mode
],
"endpoints": {
"battery_status": "battery_status_charging.json",
"charge_mode": "charge_mode_always.json",
"cockpit": "cockpit_ev.json",
"hvac_status": "hvac_status.json",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG,
ATTR_ENTITY_ID: "binary_sensor.reg_number_plugged_in",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_plugged_in",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY_CHARGING,
ATTR_ENTITY_ID: "binary_sensor.reg_number_charging",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging",
},
],
DEVICE_TRACKER_DOMAIN: [],
SELECT_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_MODE,
ATTR_ENTITY_ID: "select.reg_number_charge_mode",
ATTR_ICON: "mdi:calendar-remove",
ATTR_OPTIONS: ["always", "always_charging", "schedule_mode"],
ATTR_STATE: "always",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charge_mode",
},
],
SENSOR_DOMAIN: [
{
ATTR_ENTITY_ID: "sensor.reg_number_battery_autonomy",
ATTR_ICON: "mdi:ev-station",
ATTR_STATE: "141",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_autonomy",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_available_energy",
ATTR_STATE: "31",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_available_energy",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_level",
ATTR_STATE: "60",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_level",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_battery_last_activity",
ATTR_STATE: "2020-01-12T21:40:16+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_last_activity",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.reg_number_battery_temperature",
ATTR_STATE: "20",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_charge_state",
ATTR_ICON: "mdi:flash",
ATTR_STATE: "charge_in_progress",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charge_state",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_POWER,
ATTR_ENTITY_ID: "sensor.reg_number_charging_power",
ATTR_STATE: "0.027",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging_power",
ATTR_UNIT_OF_MEASUREMENT: POWER_KILO_WATT,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_charging_remaining_time",
ATTR_ICON: "mdi:timer",
ATTR_STATE: "145",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging_remaining_time",
ATTR_UNIT_OF_MEASUREMENT: TIME_MINUTES,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_mileage",
ATTR_ICON: "mdi:sign-direction",
ATTR_STATE: "49114",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_mileage",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.reg_number_outside_temperature",
ATTR_STATE: "8.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_outside_temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_plug_state",
ATTR_ICON: "mdi:power-plug",
ATTR_STATE: "plugged",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_plug_state",
},
],
},
"zoe_50": {
"expected_device": {
ATTR_IDENTIFIERS: {(DOMAIN, "VF1AAAAA555777999")},
ATTR_MANUFACTURER: "Renault",
ATTR_MODEL: "Zoe",
ATTR_NAME: "REG-NUMBER",
ATTR_SW_VERSION: "X102VE",
},
"endpoints_available": [
True, # cockpit
False, # hvac-status
True, # location
True, # battery-status
True, # charge-mode
],
"endpoints": {
"battery_status": "battery_status_not_charging.json",
"charge_mode": "charge_mode_schedule.json",
"cockpit": "cockpit_ev.json",
"location": "location.json",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG,
ATTR_ENTITY_ID: "binary_sensor.reg_number_plugged_in",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_plugged_in",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY_CHARGING,
ATTR_ENTITY_ID: "binary_sensor.reg_number_charging",
ATTR_STATE: STATE_OFF,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging",
},
],
DEVICE_TRACKER_DOMAIN: [
{
ATTR_ENTITY_ID: "device_tracker.reg_number_location",
ATTR_ICON: "mdi:car",
ATTR_STATE: STATE_NOT_HOME,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_location",
}
],
SELECT_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_MODE,
ATTR_ENTITY_ID: "select.reg_number_charge_mode",
ATTR_ICON: "mdi:calendar-clock",
ATTR_OPTIONS: ["always", "always_charging", "schedule_mode"],
ATTR_STATE: "schedule_mode",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charge_mode",
},
],
SENSOR_DOMAIN: [
{
ATTR_ENTITY_ID: "sensor.reg_number_battery_autonomy",
ATTR_ICON: "mdi:ev-station",
ATTR_STATE: "128",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_autonomy",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_available_energy",
ATTR_STATE: "0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_available_energy",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_level",
ATTR_STATE: "50",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_level",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_battery_last_activity",
ATTR_STATE: "2020-11-17T08:06:48+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_last_activity",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.reg_number_battery_temperature",
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_battery_temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_charge_state",
ATTR_ICON: "mdi:flash-off",
ATTR_STATE: "charge_error",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charge_state",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CURRENT,
ATTR_ENTITY_ID: "sensor.reg_number_charging_power",
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging_power",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_charging_remaining_time",
ATTR_ICON: "mdi:timer",
ATTR_STATE: STATE_UNKNOWN,
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_charging_remaining_time",
ATTR_UNIT_OF_MEASUREMENT: TIME_MINUTES,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_mileage",
ATTR_ICON: "mdi:sign-direction",
ATTR_STATE: "49114",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "vf1aaaaa555777999_mileage",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_plug_state",
ATTR_ICON: "mdi:power-plug-off",
ATTR_STATE: "unplugged",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_plug_state",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_location_last_activity",
ATTR_STATE: "2020-02-18T16:58:38+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777999_location_last_activity",
},
],
},
"captur_phev": {
"expected_device": {
ATTR_IDENTIFIERS: {(DOMAIN, "VF1AAAAA555777123")},
ATTR_MANUFACTURER: "Renault",
ATTR_MODEL: "Captur ii",
ATTR_NAME: "REG-NUMBER",
ATTR_SW_VERSION: "XJB1SU",
},
"endpoints_available": [
True, # cockpit
False, # hvac-status
True, # location
True, # battery-status
True, # charge-mode
],
"endpoints": {
"battery_status": "battery_status_charging.json",
"charge_mode": "charge_mode_always.json",
"cockpit": "cockpit_fuel.json",
"location": "location.json",
},
BINARY_SENSOR_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG,
ATTR_ENTITY_ID: "binary_sensor.reg_number_plugged_in",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_plugged_in",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY_CHARGING,
ATTR_ENTITY_ID: "binary_sensor.reg_number_charging",
ATTR_STATE: STATE_ON,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_charging",
},
],
DEVICE_TRACKER_DOMAIN: [
{
ATTR_ENTITY_ID: "device_tracker.reg_number_location",
ATTR_ICON: "mdi:car",
ATTR_STATE: STATE_NOT_HOME,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_location",
}
],
SELECT_DOMAIN: [
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_MODE,
ATTR_ENTITY_ID: "select.reg_number_charge_mode",
ATTR_ICON: "mdi:calendar-remove",
ATTR_OPTIONS: ["always", "always_charging", "schedule_mode"],
ATTR_STATE: "always",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_charge_mode",
},
],
SENSOR_DOMAIN: [
{
ATTR_ENTITY_ID: "sensor.reg_number_battery_autonomy",
ATTR_ICON: "mdi:ev-station",
ATTR_STATE: "141",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_battery_autonomy",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_ENERGY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_available_energy",
ATTR_STATE: "31",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_battery_available_energy",
ATTR_UNIT_OF_MEASUREMENT: ENERGY_KILO_WATT_HOUR,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_BATTERY,
ATTR_ENTITY_ID: "sensor.reg_number_battery_level",
ATTR_STATE: "60",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_battery_level",
ATTR_UNIT_OF_MEASUREMENT: PERCENTAGE,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_battery_last_activity",
ATTR_STATE: "2020-01-12T21:40:16+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_battery_last_activity",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TEMPERATURE,
ATTR_ENTITY_ID: "sensor.reg_number_battery_temperature",
ATTR_STATE: "20",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_battery_temperature",
ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CHARGE_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_charge_state",
ATTR_ICON: "mdi:flash",
ATTR_STATE: "charge_in_progress",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_charge_state",
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_CURRENT,
ATTR_ENTITY_ID: "sensor.reg_number_charging_power",
ATTR_STATE: "27.0",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_charging_power",
ATTR_UNIT_OF_MEASUREMENT: ELECTRIC_CURRENT_AMPERE,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_charging_remaining_time",
ATTR_ICON: "mdi:timer",
ATTR_STATE: "145",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_charging_remaining_time",
ATTR_UNIT_OF_MEASUREMENT: TIME_MINUTES,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_fuel_autonomy",
ATTR_ICON: "mdi:gas-station",
ATTR_STATE: "35",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_fuel_autonomy",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_fuel_quantity",
ATTR_ICON: "mdi:fuel",
ATTR_STATE: "3",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_fuel_quantity",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_LITERS,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_mileage",
ATTR_ICON: "mdi:sign-direction",
ATTR_STATE: "5567",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_mileage",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_PLUG_STATE,
ATTR_ENTITY_ID: "sensor.reg_number_plug_state",
ATTR_ICON: "mdi:power-plug",
ATTR_STATE: "plugged",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_plug_state",
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_location_last_activity",
ATTR_STATE: "2020-02-18T16:58:38+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_location_last_activity",
},
],
},
"captur_fuel": {
"expected_device": {
ATTR_IDENTIFIERS: {(DOMAIN, "VF1AAAAA555777123")},
ATTR_MANUFACTURER: "Renault",
ATTR_MODEL: "Captur ii",
ATTR_NAME: "REG-NUMBER",
ATTR_SW_VERSION: "XJB1SU",
},
"endpoints_available": [
True, # cockpit
False, # hvac-status
True, # location
# Ignore, # battery-status
# Ignore, # charge-mode
],
"endpoints": {
"cockpit": "cockpit_fuel.json",
"location": "location.json",
},
BINARY_SENSOR_DOMAIN: [],
DEVICE_TRACKER_DOMAIN: [
{
ATTR_ENTITY_ID: "device_tracker.reg_number_location",
ATTR_ICON: "mdi:car",
ATTR_STATE: STATE_NOT_HOME,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_location",
}
],
SELECT_DOMAIN: [],
SENSOR_DOMAIN: [
{
ATTR_ENTITY_ID: "sensor.reg_number_fuel_autonomy",
ATTR_ICON: "mdi:gas-station",
ATTR_STATE: "35",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_fuel_autonomy",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_fuel_quantity",
ATTR_ICON: "mdi:fuel",
ATTR_STATE: "3",
ATTR_STATE_CLASS: STATE_CLASS_MEASUREMENT,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_fuel_quantity",
ATTR_UNIT_OF_MEASUREMENT: VOLUME_LITERS,
},
{
ATTR_ENTITY_ID: "sensor.reg_number_mileage",
ATTR_ICON: "mdi:sign-direction",
ATTR_STATE: "5567",
ATTR_STATE_CLASS: STATE_CLASS_TOTAL_INCREASING,
ATTR_UNIQUE_ID: "vf1aaaaa555777123_mileage",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
},
{
ATTR_DEFAULT_DISABLED: True,
ATTR_DEVICE_CLASS: DEVICE_CLASS_TIMESTAMP,
ATTR_ENTITY_ID: "sensor.reg_number_location_last_activity",
ATTR_STATE: "2020-02-18T16:58:38+00:00",
ATTR_UNIQUE_ID: "vf1aaaaa555777123_location_last_activity",
},
],
},
}
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a console, the CLI friendly front-end to plaso."""
import argparse
import logging
import os
import random
import sys
import tempfile
from dfvfs.lib import definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
try:
# Support version 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.terminal.embed import InteractiveShellEmbed
except ImportError:
# Support version older than 1.X of IPython.
# pylint: disable=no-name-in-module
from IPython.frontend.terminal.embed import InteractiveShellEmbed
from IPython.config.loader import Config
# pylint: disable=unused-import
from plaso import analysis
from plaso import filters
from plaso import formatters
from plaso import output
from plaso import parsers
from plaso import preprocessors
from plaso.engine import collector
from plaso.engine import scanner
from plaso.engine import utils as engine_utils
from plaso.engine import engine
from plaso.frontend import frontend
from plaso.frontend import rpc_proxy
from plaso.frontend import utils as frontend_utils
from plaso.lib import binary
from plaso.lib import bufferlib
from plaso.lib import errors
from plaso.lib import event
from plaso.lib import eventdata
from plaso.lib import filter_interface
from plaso.lib import foreman
from plaso.lib import lexer
from plaso.lib import objectfilter
from plaso.lib import output as output_lib
from plaso.lib import pfilter
from plaso.lib import process_info
from plaso.lib import proxy
from plaso.lib import putils
from plaso.lib import queue
from plaso.lib import registry as class_registry
from plaso.lib import storage
from plaso.lib import timelib
from plaso.lib import utils
from plaso.output import helper as output_helper
from plaso.parsers import manager as parsers_manager
from plaso.parsers import plugins
from plaso.parsers import text_parser
from plaso.proto import plaso_storage_pb2
from plaso.serializer import interface as serializer_interface
from plaso.serializer import json_serializer
from plaso.serializer import protobuf_serializer
from plaso.unix import bsmtoken
from plaso.winnt import environ_expand
from plaso.winnt import known_folder_ids
from plaso.winreg import cache as win_registry_cache
from plaso.winreg import interface as win_registry_interface
from plaso.winreg import path_expander
from plaso.winreg import utils as win_registry_utils
from plaso.winreg import winpyregf
from plaso.winreg import winregistry
class PshellFrontend(frontend.ExtractionFrontend):
"""Class that implements the pshell front-end."""
_BYTES_IN_A_MIB = 1024 * 1024
def __init__(self):
"""Initializes the front-end object."""
input_reader = frontend.StdinFrontendInputReader()
output_writer = frontend.StdoutFrontendOutputWriter()
super(PshellFrontend, self).__init__(input_reader, output_writer)
def FindAllOutputs():
"""FindAllOutputs() - All available outputs."""
return putils.FindAllOutputs()
def GetEventData(event_proto, before=0):
"""Prints a hexdump of the event data."""
return frontend_utils.OutputWriter.GetEventDataHexDump(event_proto, before)
def GetFileEntryFromEventObject(event_object):
"""Return a file entry object from a pathspec object.
Args:
event_object: An event object (an instance of EventObject).
Returns:
A file entry object (instance of vfs.file_entry.FileEntry) or
None if the event object doesn't have a defined path spec.
"""
path_spec = getattr(event_object, 'pathspec', None)
if not path_spec:
return
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def GetParserNames(parser_filter_string=None):
"""Retrieves the parser names.
Args:
parser_filter_string: Optional parser filter string. The default is None.
Returns:
A list of parser names.
"""
return parsers_manager.ParsersManager.GetParserNames(
parser_filter_string=parser_filter_string)
def GetParserObjects(parser_filter_string=None):
"""Retrieves the parser objects.
Args:
parser_filter_string: Optional parser filter string. The default is None.
Returns:
A list of parser objects (instances of BaseParser).
"""
return parsers_manager.ParsersManager.GetParserObjects(
parser_filter_string=parser_filter_string)
def OpenOSFile(path):
"""Opens a file entry from the OS."""
if not os.path.isfile(path):
logging.error(u'File: {0:s} does not exist.'.format(path))
return
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=path)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def OpenStorageFile(storage_path):
"""Opens a storage file and returns the storage file object."""
if not os.path.isfile(storage_path):
return
try:
store = storage.StorageFile(storage_path, read_only=True)
except IOError:
print 'Unable to load storage file, not a storage file?'
return store
def OpenTskFile(image_path, image_offset, path=None, inode=None):
"""Opens a file entry of a file inside an image file."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=image_path)
if image_offset > 0:
volume_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK_PARTITION, start_offset=image_offset,
parent=path_spec)
else:
volume_path_spec = path_spec
if inode is not None:
if path is None:
path = u''
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, inode=inode, location=path,
parent=volume_path_spec)
else:
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location=path, parent=volume_path_spec)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def OpenVssFile(path, image_path, store_number, image_offset):
"""Opens a file entry inside a VSS inside an image file."""
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=image_path)
if image_offset > 0:
volume_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK_PARTITION, start_offset=image_offset,
parent=path_spec)
else:
volume_path_spec = path_spec
store_number -= 1
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, store_index=store_number,
parent=volume_path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK, location=path, parent=path_spec)
return path_spec_resolver.Resolver.OpenFileEntry(path_spec)
def ParseFile(file_entry):
"""Parse a file given a file entry or path and return a list of results.
Args:
file_entry: Either a file entry object (instance of dfvfs.FileEntry)
or a string containing a path (absolute or relative) to a
local file.
Returns:
A list of event object (instance of EventObject) that were extracted from
the file (or an empty list if no events were extracted).
"""
if not file_entry:
return
if isinstance(file_entry, basestring):
file_entry = OpenOSFile(file_entry)
# Set up the engine.
collection_queue = queue.SingleThreadedQueue()
storage_queue = queue.SingleThreadedQueue()
parse_error_queue = queue.SingleThreadedQueue()
engine_object = engine.Engine(
collection_queue, storage_queue, parse_error_queue)
# Create a worker.
worker_object = engine_object.CreateExtractionWorker('0')
# TODO: add support for parser_filter_string.
worker_object.InitalizeParserObjects()
worker_object.ParseFileEntry(file_entry)
collection_queue.SignalEndOfInput()
engine_object.SignalEndOfInputStorageQueue()
results = []
while True:
try:
item = storage_queue.PopItem()
except errors.QueueEmpty:
break
if isinstance(item, queue.QueueEndOfInput):
break
results.append(item)
return results
def Pfile2File(file_object, path):
"""Saves a file-like object to the path."""
return frontend_utils.OutputWriter.WriteFile(file_object, path)
def PrintTimestamp(timestamp):
"""Prints a human readable timestamp from a timestamp value."""
return frontend_utils.OutputWriter.GetDateTimeString(timestamp)
def PrintTimestampFromEvent(event_object):
"""Prints a human readable timestamp from values stored in an event object."""
return PrintTimestamp(getattr(event_object, 'timestamp', 0))
def Main():
"""Start the tool."""
temp_location = tempfile.gettempdir()
options = putils.Options()
# Set the default options.
options.buffer_size = 0
options.debug = False
options.filename = '.'
options.file_filter = ''
options.filter = ''
options.image = False
options.image_offset = None
options.image_offset_bytes = None
options.old_preprocess = False
options.open_files = False
options.output = os.path.join(temp_location, 'wheredidmytimelinego.dump')
options.output_module = ''
options.parsers = ''
options.parse_vss = False
options.preprocess = False
options.recursive = False
options.single_process = False
options.timezone = 'UTC'
options.workers = 5
format_str = '[%(levelname)s] (%(processName)-10s) %(message)s'
logging.basicConfig(format=format_str)
front_end = PshellFrontend()
try:
front_end.ParseOptions(options, source_option='filename')
front_end.SetStorageFile(options.output)
except errors.BadConfigOption as exception:
logging.error(u'{0:s}'.format(exception))
# TODO: move to frontend object.
if options.image and options.image_offset_bytes is None:
if options.image_offset is not None:
bytes_per_sector = getattr(options, 'bytes_per_sector', 512)
options.image_offset_bytes = options.image_offset * bytes_per_sector
else:
options.image_offset_bytes = 0
namespace = {}
pre_obj = event.PreprocessObject()
namespace.update(globals())
namespace.update({
'frontend': front_end,
'pre_obj': pre_obj,
'options': options,
'find_all_output': FindAllOutputs,
'parse_file': ParseFile,
'timestamp_from_event': PrintTimestampFromEvent,
'message': formatters.manager.EventFormatterManager.GetMessageStrings})
# Include few random phrases that get thrown in once the user exists the
# shell.
_my_random_phrases = [
u'I haven\'t seen timelines like this since yesterday.',
u'Timelining is super relaxing.',
u'Why did I not use the shell before?',
u'I like a do da cha cha',
u'I AM the Shogun of Harlem!',
(u'It doesn\'t matter if you win or lose, it\'s what you do with your '
u'dancin\' shoes'),
u'I have not had a night like that since the seventies.',
u'Baker Team. They\'re all dead, sir.',
(u'I could have killed \'em all, I could\'ve killed you. In town '
u'you\'re the law, out here it\'s me.'),
(u'Are you telling me that 200 of our men against your boy is a no-win '
u'situation for us?'),
u'Hunting? We ain\'t huntin\' him, he\'s huntin\' us!',
u'You picked the wrong man to push',
u'Live for nothing or die for something',
u'I am the Fred Astaire of karate.',
(u'God gave me a great body and it\'s my duty to take care of my '
u'physical temple.'),
u'This maniac should be wearing a number, not a badge',
u'Imagination is more important than knowledge.',
u'Do you hate being dead?',
u'You\'ve got 5 seconds... and 3 are up.',
u'He is in a gunfight right now. I\'m gonna have to take a message',
u'That would be better than losing your teeth',
u'The less you know, the more you make',
(u'A SQL query goes into a bar, walks up to two tables and asks, '
u'"Can I join you?"'),
u'This is your captor speaking.',
(u'If I find out you\'re lying, I\'ll come back and kill you in your '
u'own kitchen.'),
u'That would be better than losing your teeth',
(u'He\'s the kind of guy who would drink a gallon of gasoline so '
u'that he can p*ss into your campfire.'),
u'I\'m gonna take you to the bank, Senator Trent. To the blood bank!',
u'I missed! I never miss! They must have been smaller than I thought',
u'Nah. I\'m just a cook.',
u'Next thing I know, you\'ll be dating musicians.',
u'Another cold day in hell',
u'Yeah, but I bet you she doesn\'t see these boys in the choir.',
u'You guys think you\'re above the law... well you ain\'t above mine!',
(u'One thought he was invincible... the other thought he could fly... '
u'They were both wrong'),
u'To understand what recursion is, you must first understand recursion']
arg_description = (
u'pshell is the interactive session tool that can be used to'
u'MISSING')
arg_parser = argparse.ArgumentParser(description=arg_description)
arg_parser.add_argument(
'-s', '--storage_file', '--storage-file', dest='storage_file',
type=unicode, default=u'', help=u'Path to a plaso storage file.',
action='store', metavar='PATH')
configuration = arg_parser.parse_args()
if configuration.storage_file:
store = OpenStorageFile(configuration.storage_file)
if store:
namespace.update({'store': store})
functions = [
FindAllOutputs, GetEventData, GetParserNames, GetParserObjects,
OpenOSFile, OpenStorageFile, OpenTskFile, OpenVssFile,
ParseFile, Pfile2File,
PrintTimestamp, PrintTimestampFromEvent]
functions_strings = []
for function in functions:
docstring, _, _ = function.__doc__.partition(u'\n')
docstring = u'\t{0:s} - {1:s}'.format(function.__name__, docstring)
functions_strings.append(docstring)
functions_strings = u'\n'.join(functions_strings)
banner = (
u'--------------------------------------------------------------\n'
u' Welcome to Plaso console - home of the Plaso adventure land.\n'
u'--------------------------------------------------------------\n'
u'This is the place where everything is allowed, as long as it is '
u'written in Python.\n\n'
u'Objects available:\n\toptions - set of options to the frontend.\n'
u'\tfrontend - A copy of the pshell frontend.\n'
u'\n'
u'All libraries have been imported and can be used, see help(frontend) '
u'or help(parser).\n'
u'\n'
u'Base methods:\n'
u'{0:s}'
u'\n\tmessage - Print message strings from an event object.'
u'\n'
u'\n'
u'p.s. typing in "pdb" and pressing enter puts the shell in debug'
u'mode which causes all exceptions being sent to pdb.\n'
u'Happy command line console fu-ing.\n\n').format(functions_strings)
exit_message = u'You are now leaving the winter wonderland.\n\n{}'.format(
random.choice(_my_random_phrases))
shell_config = Config()
# Make slight adjustments to the iPython prompt.
shell_config.PromptManager.out_template = (
r'{color.Normal}[{color.Red}\#{color.Normal}]<<< ')
shell_config.PromptManager.in_template = (
r'[{color.LightBlue}\T{color.Normal}] {color.LightPurple}\Y2\n'
r'{color.Normal}[{color.Red}\#{color.Normal}] \$ ')
shell_config.PromptManager.in2_template = r'.\D.>>>'
ipshell = InteractiveShellEmbed(
user_ns=namespace, config=shell_config, banner1=banner,
exit_msg=exit_message)
ipshell.confirm_exit = False
# Set autocall to two, making parenthesis not necessary when calling
# function names (although they can be used and are necessary sometimes,
# like in variable assignments, etc).
ipshell.autocall = 2
ipshell()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| |
from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.linalg import eigh
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.extmath import _deterministic_vector_sign_flip
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_sparse_graph_connected_component():
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in zip(boundaries[:-1], boundaries[1:]):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(.1, 42, size=len(connections))
affinity = coo_matrix((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in zip(boundaries[:-1], boundaries[1:]):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert_equal(component_1.sum(), component_size)
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert_equal(component_2.sum(), component_size)
assert_array_equal(component_1, component_2)
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
| |
# -*- coding: utf-8 -*-
import abc
import collections
import re
from watson.http import REQUEST_METHODS, MIME_TYPES
from watson.common.imports import get_qualified_name
__all__ = ('Base', 'Literal', 'Segment', 'RouteMatch')
# route: The matched route
# params: The parameters that have been matched
RouteMatch = collections.namedtuple('RouteMatch', 'route params')
class Base(metaclass=abc.ABCMeta):
"""Matches a request to a specific pattern.
The only required attribute of a route is the 'path' key. This defines the
URL path that it must be matched against.
Additional options can be added to 'requires' to force additional matching.
- subdomain: The subdomain to match
- format: The accept format (/path.xml or Accept: text/xml in headers)
Child routes can also be added, to less the amount of typing required to
define further routes.
Attributes:
name (string): The name of the route, referenced by the the Router.
path (string): The url path that should be matched.
accepts (tuple): The REQUEST_METHODS that are accepted.
requires (dict): A dict of values that must be matched, can be a regular expression.
priority (int): If multiple matching routes are found, determine relevance.
Example:
.. code-block: python
routes = {
'home': {
'path': '/',
'accepts': ('GET',),
'options': {}
'defaults': {},
'requires': {
'format': 'xml'
},
'children': {
'about': {
'path': 'about'
}
}
}
}
router = Router(routes=routes)
matches = [match for match in router.matches(Request(environ))]
"""
__slots__ = ('_name', '_path', '_accepts', '_requires', '_defaults',
'_options', '_priority', '_regex_requires')
@property
def name(self):
return self._name
@property
def path(self):
return self._path
@property
def accepts(self):
return self._accepts
@property
def requires(self):
return self._requires
@property
def defaults(self):
return self._defaults
@property
def options(self):
return self._options
@property
def priority(self):
return int(self._priority) or 1
@property
def path_or_regex(self):
return self.path if self.path else self.regex
def __init__(self, name, path,
accepts=None, requires=None, defaults=None, options=None,
priority=1, **kwargs):
self._name = name
self._path = path
self._accepts = accepts or REQUEST_METHODS
self._requires = requires or {}
self._defaults = defaults or {}
self._options = options or {}
self._priority = priority
self._process_requires()
def builder(cls, **definition):
raise NotImplementedError()
def _process_requires(self):
self._regex_requires = {k: re.compile(v) for k, v in self.requires.items() if isinstance(v, str)}
def assemble(self, prefix=None, **kwargs):
raise NotImplementedError()
def match(self, request):
"""Match the route to a request and return the matched parameters.
Processes the route against the following requirements:
- REQUEST_METHOD
- Subdomain
- Format
- GET vars
If any of the above requirements fail, no parameters are returned, and
the route is considered invalid.
Methods that override this should return a RouteMatch(self, params)
object.
Args:
request (watson.http.messages.Request): The request to match.
"""
params = self.defaults.copy()
requires = self.requires.copy()
if request.method not in self._accepts:
return None
if 'subdomain' in self.requires:
del requires['subdomain']
subdomain = self.requires['subdomain']
if isinstance(subdomain, (list, tuple)):
if request.url.subdomain not in subdomain:
return None
elif request.url.subdomain != subdomain:
return None
if 'format' in self.requires:
del requires['format']
accept_headers = request.environ.get('HTTP_ACCEPT')
formats = [format for format
in MIME_TYPES if accept_headers in MIME_TYPES[format]]
if formats:
for format in formats:
if self._regex_requires['format'].match(format):
params['format'] = format
else:
return None
if request.method == 'GET' and requires and request.get:
for key, value in request.get.items():
regex = self._regex_requires.get(key, None)
if regex:
if regex.match(value):
params[key] = value
else:
return None
return params
def __repr__(self):
return (
'<{0} name:{1} path:{2}>'.format(
get_qualified_name(self),
self.name,
self.path)
)
segments_pattern = re.compile(r'(?P<static>[^:\[\]]*)(?P<token>[:\[\]]|$)')
token_pattern = re.compile(r'(?P<name>[^:/\[\]]+)')
optional_segment_string = '(?:{value})?'
value_pattern_string = '(?P<{value}>{end})'
end_pattern_string = '[^/]+'
def segments_from_path(path):
"""Converts a segmented path into a regular expression.
A segmented route can be any of the following:
- /route/:segment, segment will be a required parameter
- /route[/:segment], segment will be an optional parameter
- /route[/:segment[/:nested]] - segment will be a optional parameter
Inspired by both Rails and ZF2.
Args:
path: the segmented path to convert to regex
Returns:
list: A list of segments based on the path.
"""
depth, segments = 0, []
depth_segments = [segments]
while path:
matches = segments_pattern.search(path)
segment_matches = matches.groups()
offset = '{0}{1}'.format(segment_matches[0], segment_matches[1])
path = path[len(offset):]
token = matches.group('token')
static = matches.group('static')
if static:
depth_segments[depth].append(('static', static))
if token == ':':
named_segment = token_pattern.search(path)
segment = named_segment.groupdict()['name']
depth_segments[depth].append(('segment', segment))
path = path[len(segment):]
elif token == '[':
depth += 1
current_depth = depth - 1
total_depths = len(depth_segments)
if total_depths <= depth:
depth_segments.append([])
depth_segments[current_depth].append(('optional', []))
depth_segments[depth] = depth_segments[current_depth][len(depth_segments[current_depth]) - 1][1]
elif token == ']':
del depth_segments[depth]
depth -= 1
if depth < 0:
raise ValueError('Bracket mismatch detected.')
else:
break
del depth_segments
return segments
def regex_from_segments(segments, requires=None, escape_segment=True):
"""Converts a list of segment tuple pairs into a regular expression string.
Args:
segments (list): The segment tuple pairs to convert.
requires (dict): Key/value pairs to be used in each segment.
Returns:
string: The regex for the segments
"""
regex = []
for type_, value in segments:
if type_ == 'static':
if escape_segment:
value = re.escape(value)
regex.append(value)
elif type_ == 'optional':
regex.append(
optional_segment_string.format(
value=regex_from_segments(value, requires)))
else:
regex.append(
value_pattern_string.format(
value=value,
end=requires.get(value, end_pattern_string)))
regex.append('$')
return ''.join(regex)
def path_from_segments(segments, params, optional=False):
"""Converts a list of segment tuple pairs into a url path.
Args:
segments (list): The segment tuple pairs to convert.
params (dict): Key/value pairs for each segment and its value.
"""
path = []
for segment in segments:
type_, name = segment
optional = optional if optional else type_ == 'optional'
if isinstance(name, list):
path.append(path_from_segments(name, params, optional))
else:
if type_ == 'segment':
if name in params and params[name]:
path.append(str(params[name]))
elif optional:
remove_segments = len(segments) - 1
path = path[0:-remove_segments]
else:
raise KeyError("Missing '{0}' in params.".format(name))
else:
path.append(name)
return ''.join(path)
class Segment(Base):
"""Matches a request against a regular expression.
Attributes:
regex (SRE_Pattern): The regex pattern used to match the path.
segments (list): A tuple pair list of segments for the route.
"""
__slots__ = ('_regex', '_segments')
@property
def regex(self):
return self._regex
@regex.setter
def regex(self, regex):
if isinstance(regex, str):
escape = regex.startswith('/')
self._segments = segments_from_path(regex)
regex_string = regex_from_segments(
self.segments, self.requires, escape_segment=escape)
regex = re.compile(regex_string)
self._regex = regex
@property
def segments(self):
return self._segments
def __init__(self, name, path=None,
accepts=None, requires=None, defaults=None, options=None,
priority=1, regex=None, **kwargs):
if not path and not regex:
raise TypeError(
'You must specify either path or regex for the route named {0}'.format(name))
super(Segment, self).__init__(
name, path,
accepts, requires, defaults, options, priority, **kwargs)
self.regex = regex if regex else path
def assemble(self, prefix=None, **kwargs):
"""Converts the route into a path.
Applies any keyword arguments as params on the route.
Example:
.. code-block:: python
route = Route('search', path='/search/:keyword')
route.assemble(keyword='test') # /search/test
"""
params = collections.ChainMap(kwargs or {}, self.defaults)
path = path_from_segments(self.segments, params)
return prefix + path if prefix else path
def match(self, request):
params = super(Segment, self).match(request)
if params is None:
return None
matches = self.regex.match(request.environ.get('PATH_INFO'))
if matches:
params = dict(params, **matches.groupdict())
for k, v in self.defaults.items():
if params[k] is None:
params[k] = v
return RouteMatch(self, params)
return None
@classmethod
def builder(cls, **definition):
if ('regex' in definition
or ('path' in definition # noqa
and any((c in {'[', ':'}) for c in definition['path']))): # noqa
return cls(**definition)
raise TypeError('Not a valid Segment')
def __repr__(self):
class_ = get_qualified_name(self)
if self.path:
return (
'<{0} name:{1} path:{2} match:{3}>'.format(
class_,
self.name,
self.path,
self.regex.pattern)
)
return (
'<{0} name:{1} match:{2}>'.format(
class_,
self.name,
self.regex.pattern)
)
class Literal(Base):
"""Matches a request against a literal path.
A literal path is classified as /example/path where there are no dynamic
elements.
"""
def assemble(self, prefix=None, **kwargs):
"""Converts the route into a path.
Applies any keyword arguments as params on the route.
Example:
.. code-block:: python
route = Literal('search', path='/search/:keyword')
route.assemble(keyword='test') # /search/test
"""
return prefix + self.path if prefix else self.path
def match(self, request):
params = super(Literal, self).match(request)
if params is not None and request.environ['PATH_INFO'] == self.path:
return RouteMatch(self, params=params)
return None
@classmethod
def builder(cls, **definition):
return cls(**definition)
# Deprecated, will be removed in the next major version
BaseRoute = Base
LiteralRoute = Literal
SegmentRoute = Segment
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A wrapper for the GStreamer Python bindings that exposes a simple
music player.
"""
from __future__ import division, absolute_import, print_function
import six
import sys
import time
from six.moves import _thread
import os
import copy
from six.moves import urllib
from beets import ui
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer(object):
"""A music player abstracting GStreamer's Playbin element.
Create a player object, then call run() to start a thread with a
runloop. Then call play_file to play music. Use player.playing
to check whether music is currently playing.
A basic play queue is also implemented (just a Python list,
player.queue, whose last element is next to play). To use it,
just call enqueue() and then play(). When a track finishes and
another is available on the queue, it is played automatically.
"""
def __init__(self, finished_callback=None):
"""Initialize a player.
If a finished_callback is provided, it is called every time a
track started with play_file finishes.
Once the player has been created, call run() to begin the main
runloop in a separate thread.
"""
# Set up the Gstreamer player. From the pygst tutorial:
# http://pygstdocs.berlios.de/pygst-tutorial/playbin.html
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
bus.connect("message", self._handle_message)
# Set up our own stuff.
self.playing = False
self.finished_callback = finished_callback
self.cached_time = None
self._volume = 1.0
def _get_state(self):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(u"Error: {0}".format(err))
self.playing = False
def _set_volume(self, volume):
"""Set the volume level to a value in the range [0, 1.5]."""
# And the volume for the playbin.
self._volume = volume
self.player.set_property("volume", volume)
def _get_volume(self):
"""Get the volume as a float in the range [0, 1.5]."""
return self._volume
volume = property(_get_volume, _set_volume)
def play_file(self, path):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(Gst.State.NULL)
if isinstance(path, six.text_type):
path = path.encode('utf-8')
uri = 'file://' + urllib.parse.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
def run(self):
"""Start a new thread for the player.
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
def start():
loop = GLib.MainLoop()
loop.run()
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = Gst.Format(Gst.Format.TIME)
try:
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] / (10 ** 9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] / (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
if self.playing and self.cached_time:
return self.cached_time
else:
return (0, 0)
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
if position > cur_len:
self.stop()
return
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
def block(self):
"""Block until playing finishes."""
while self.playing:
time.sleep(1)
def play_simple(paths):
"""Play the files in paths in a straightforward way, without
using the player's callback function.
"""
p = GstPlayer()
p.run()
for path in paths:
p.play_file(path)
p.block()
def play_complicated(paths):
"""Play the files in the path one after the other by using the
callback function to advance to the next song.
"""
my_paths = copy.copy(paths)
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.
paths = [os.path.abspath(os.path.expanduser(p))
for p in sys.argv[1:]]
# play_simple(paths)
play_complicated(paths)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.websecurityscanner.v1alpha WebSecurityScanner API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.websecurityscanner_v1alpha.gapic import enums
from google.cloud.websecurityscanner_v1alpha.gapic import web_security_scanner_client_config
from google.cloud.websecurityscanner_v1alpha.gapic.transports import web_security_scanner_grpc_transport
from google.cloud.websecurityscanner_v1alpha.proto import finding_pb2
from google.cloud.websecurityscanner_v1alpha.proto import scan_config_pb2
from google.cloud.websecurityscanner_v1alpha.proto import scan_run_pb2
from google.cloud.websecurityscanner_v1alpha.proto import web_security_scanner_pb2
from google.cloud.websecurityscanner_v1alpha.proto import web_security_scanner_pb2_grpc
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-websecurityscanner', ).version
class WebSecurityScannerClient(object):
"""
Cloud Web Security Scanner Service identifies security vulnerabilities in web
applications hosted on Google Cloud Platform. It crawls your application, and
attempts to exercise as many user inputs and event handlers as possible.
"""
SERVICE_ADDRESS = 'websecurityscanner.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.websecurityscanner.v1alpha.WebSecurityScanner'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
WebSecurityScannerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
'projects/{project}',
project=project,
)
@classmethod
def scan_config_path(cls, project, scan_config):
"""Return a fully-qualified scan_config string."""
return google.api_core.path_template.expand(
'projects/{project}/scanConfigs/{scan_config}',
project=project,
scan_config=scan_config,
)
@classmethod
def scan_run_path(cls, project, scan_config, scan_run):
"""Return a fully-qualified scan_run string."""
return google.api_core.path_template.expand(
'projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}',
project=project,
scan_config=scan_config,
scan_run=scan_run,
)
@classmethod
def finding_path(cls, project, scan_config, scan_run, finding):
"""Return a fully-qualified finding string."""
return google.api_core.path_template.expand(
'projects/{project}/scanConfigs/{scan_config}/scanRuns/{scan_run}/findings/{finding}',
project=project,
scan_config=scan_config,
scan_run=scan_run,
finding=finding,
)
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=web_security_scanner_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.WebSecurityScannerGrpcTransport,
Callable[[~.Credentials, type], ~.WebSecurityScannerGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=web_security_scanner_grpc_transport.
WebSecurityScannerGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = web_security_scanner_grpc_transport.WebSecurityScannerGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_scan_config(self,
parent,
scan_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a new ScanConfig.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``scan_config``:
>>> scan_config = {}
>>>
>>> response = client.create_scan_config(parent, scan_config)
Args:
parent (str): Required.
The parent resource name where the scan is created, which should be a
project resource name in the format 'projects/{projectId}'.
scan_config (Union[dict, ~google.cloud.websecurityscanner_v1alpha.types.ScanConfig]): Required.
The ScanConfig to be created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_scan_config' not in self._inner_api_calls:
self._inner_api_calls[
'create_scan_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_scan_config,
default_retry=self._method_configs['CreateScanConfig'].
retry,
default_timeout=self._method_configs['CreateScanConfig'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.CreateScanConfigRequest(
parent=parent,
scan_config=scan_config,
)
return self._inner_api_calls['create_scan_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def delete_scan_config(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes an existing ScanConfig and its child resources.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.scan_config_path('[PROJECT]', '[SCAN_CONFIG]')
>>>
>>> client.delete_scan_config(name)
Args:
name (str): Required.
The resource name of the ScanConfig to be deleted. The name follows the
format of 'projects/{projectId}/scanConfigs/{scanConfigId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_scan_config' not in self._inner_api_calls:
self._inner_api_calls[
'delete_scan_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_scan_config,
default_retry=self._method_configs['DeleteScanConfig'].
retry,
default_timeout=self._method_configs['DeleteScanConfig'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.DeleteScanConfigRequest(name=name, )
self._inner_api_calls['delete_scan_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_scan_config(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets a ScanConfig.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.scan_config_path('[PROJECT]', '[SCAN_CONFIG]')
>>>
>>> response = client.get_scan_config(name)
Args:
name (str): Required.
The resource name of the ScanConfig to be returned. The name follows the
format of 'projects/{projectId}/scanConfigs/{scanConfigId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_scan_config' not in self._inner_api_calls:
self._inner_api_calls[
'get_scan_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_scan_config,
default_retry=self._method_configs['GetScanConfig'].retry,
default_timeout=self._method_configs['GetScanConfig'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.GetScanConfigRequest(name=name, )
return self._inner_api_calls['get_scan_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_scan_configs(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists ScanConfigs under a given project.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_scan_configs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_scan_configs(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The parent resource name, which should be a project resource name in the
format 'projects/{projectId}'.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_scan_configs' not in self._inner_api_calls:
self._inner_api_calls[
'list_scan_configs'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_scan_configs,
default_retry=self._method_configs['ListScanConfigs'].
retry,
default_timeout=self._method_configs['ListScanConfigs'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.ListScanConfigsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_scan_configs'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='scan_configs',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def update_scan_config(self,
scan_config,
update_mask,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates a ScanConfig. This method support partial update of a ScanConfig.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> # TODO: Initialize ``scan_config``:
>>> scan_config = {}
>>>
>>> # TODO: Initialize ``update_mask``:
>>> update_mask = {}
>>>
>>> response = client.update_scan_config(scan_config, update_mask)
Args:
scan_config (Union[dict, ~google.cloud.websecurityscanner_v1alpha.types.ScanConfig]): Required.
The ScanConfig to be updated. The name field must be set to identify the
resource to be updated. The values of fields not covered by the mask
will be ignored.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig`
update_mask (Union[dict, ~google.cloud.websecurityscanner_v1alpha.types.FieldMask]): Required.
The update mask applies to the resource. For the ``FieldMask`` definition,
see
https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.websecurityscanner_v1alpha.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanConfig` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_scan_config' not in self._inner_api_calls:
self._inner_api_calls[
'update_scan_config'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_scan_config,
default_retry=self._method_configs['UpdateScanConfig'].
retry,
default_timeout=self._method_configs['UpdateScanConfig'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.UpdateScanConfigRequest(
scan_config=scan_config,
update_mask=update_mask,
)
return self._inner_api_calls['update_scan_config'](
request, retry=retry, timeout=timeout, metadata=metadata)
def start_scan_run(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Start a ScanRun according to the given ScanConfig.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.scan_config_path('[PROJECT]', '[SCAN_CONFIG]')
>>>
>>> response = client.start_scan_run(name)
Args:
name (str): Required.
The resource name of the ScanConfig to be used. The name follows the
format of 'projects/{projectId}/scanConfigs/{scanConfigId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanRun` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'start_scan_run' not in self._inner_api_calls:
self._inner_api_calls[
'start_scan_run'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.start_scan_run,
default_retry=self._method_configs['StartScanRun'].retry,
default_timeout=self._method_configs['StartScanRun'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.StartScanRunRequest(name=name, )
return self._inner_api_calls['start_scan_run'](
request, retry=retry, timeout=timeout, metadata=metadata)
def get_scan_run(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets a ScanRun.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.scan_run_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]')
>>>
>>> response = client.get_scan_run(name)
Args:
name (str): Required.
The resource name of the ScanRun to be returned. The name follows the
format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanRun` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_scan_run' not in self._inner_api_calls:
self._inner_api_calls[
'get_scan_run'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_scan_run,
default_retry=self._method_configs['GetScanRun'].retry,
default_timeout=self._method_configs['GetScanRun'].timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.GetScanRunRequest(name=name, )
return self._inner_api_calls['get_scan_run'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_scan_runs(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists ScanRuns under a given ScanConfig, in descending order of ScanRun
stop time.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.scan_config_path('[PROJECT]', '[SCAN_CONFIG]')
>>>
>>> # Iterate over all results
>>> for element in client.list_scan_runs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_scan_runs(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The parent resource name, which should be a scan resource name in the
format 'projects/{projectId}/scanConfigs/{scanConfigId}'.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanRun` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_scan_runs' not in self._inner_api_calls:
self._inner_api_calls[
'list_scan_runs'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_scan_runs,
default_retry=self._method_configs['ListScanRuns'].retry,
default_timeout=self._method_configs['ListScanRuns'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.ListScanRunsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_scan_runs'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='scan_runs',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def stop_scan_run(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Stops a ScanRun. The stopped ScanRun is returned.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.scan_run_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]')
>>>
>>> response = client.stop_scan_run(name)
Args:
name (str): Required.
The resource name of the ScanRun to be stopped. The name follows the
format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ScanRun` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'stop_scan_run' not in self._inner_api_calls:
self._inner_api_calls[
'stop_scan_run'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.stop_scan_run,
default_retry=self._method_configs['StopScanRun'].retry,
default_timeout=self._method_configs['StopScanRun'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.StopScanRunRequest(name=name, )
return self._inner_api_calls['stop_scan_run'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_crawled_urls(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
List CrawledUrls under a given ScanRun.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.scan_run_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]')
>>>
>>> # Iterate over all results
>>> for element in client.list_crawled_urls(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_crawled_urls(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The parent resource name, which should be a scan run resource name in the
format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.websecurityscanner_v1alpha.types.CrawledUrl` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_crawled_urls' not in self._inner_api_calls:
self._inner_api_calls[
'list_crawled_urls'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_crawled_urls,
default_retry=self._method_configs['ListCrawledUrls'].
retry,
default_timeout=self._method_configs['ListCrawledUrls'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.ListCrawledUrlsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_crawled_urls'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='crawled_urls',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_finding(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets a Finding.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> name = client.finding_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]', '[FINDING]')
>>>
>>> response = client.get_finding(name)
Args:
name (str): Required.
The resource name of the Finding to be returned. The name follows the
format of
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}/findings/{findingId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.Finding` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_finding' not in self._inner_api_calls:
self._inner_api_calls[
'get_finding'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_finding,
default_retry=self._method_configs['GetFinding'].retry,
default_timeout=self._method_configs['GetFinding'].timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.GetFindingRequest(name=name, )
return self._inner_api_calls['get_finding'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_findings(self,
parent,
filter_,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
List Findings under a given ScanRun.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.scan_run_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]')
>>>
>>> # TODO: Initialize ``filter_``:
>>> filter_ = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_findings(parent, filter_):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_findings(parent, filter_, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required.
The parent resource name, which should be a scan run resource name in the
format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
filter_ (str): The filter expression. The expression must be in the format: <field>
<operator> <value>.
Supported field: 'finding_type'.
Supported operator: '='.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.websecurityscanner_v1alpha.types.Finding` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_findings' not in self._inner_api_calls:
self._inner_api_calls[
'list_findings'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_findings,
default_retry=self._method_configs['ListFindings'].retry,
default_timeout=self._method_configs['ListFindings'].
timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.ListFindingsRequest(
parent=parent,
filter=filter_,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_findings'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='findings',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_finding_type_stats(
self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
List all FindingTypeStats under a given ScanRun.
Example:
>>> from google.cloud import websecurityscanner_v1alpha
>>>
>>> client = websecurityscanner_v1alpha.WebSecurityScannerClient()
>>>
>>> parent = client.scan_run_path('[PROJECT]', '[SCAN_CONFIG]', '[SCAN_RUN]')
>>>
>>> response = client.list_finding_type_stats(parent)
Args:
parent (str): Required.
The parent resource name, which should be a scan run resource name in the
format
'projects/{projectId}/scanConfigs/{scanConfigId}/scanRuns/{scanRunId}'.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.websecurityscanner_v1alpha.types.ListFindingTypeStatsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_finding_type_stats' not in self._inner_api_calls:
self._inner_api_calls[
'list_finding_type_stats'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_finding_type_stats,
default_retry=self._method_configs['ListFindingTypeStats'].
retry,
default_timeout=self.
_method_configs['ListFindingTypeStats'].timeout,
client_info=self._client_info,
)
request = web_security_scanner_pb2.ListFindingTypeStatsRequest(
parent=parent, )
return self._inner_api_calls['list_finding_type_stats'](
request, retry=retry, timeout=timeout, metadata=metadata)
| |
import locale
import calendar
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Index, DatetimeIndex, datetime, offsets,
date_range, Timestamp)
class TestTimeSeries(object):
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_range_edges(self):
# GH 13672
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:00.000000001'),
end=Timestamp('1970-01-01 00:00:00.000000004'),
freq='N')
exp = DatetimeIndex(['1970-01-01 00:00:00.000000001',
'1970-01-01 00:00:00.000000002',
'1970-01-01 00:00:00.000000003',
'1970-01-01 00:00:00.000000004'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:00.000000004'),
end=Timestamp('1970-01-01 00:00:00.000000001'),
freq='N')
exp = DatetimeIndex([])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:00.000000001'),
end=Timestamp('1970-01-01 00:00:00.000000001'),
freq='N')
exp = DatetimeIndex(['1970-01-01 00:00:00.000000001'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:00.000001'),
end=Timestamp('1970-01-01 00:00:00.000004'),
freq='U')
exp = DatetimeIndex(['1970-01-01 00:00:00.000001',
'1970-01-01 00:00:00.000002',
'1970-01-01 00:00:00.000003',
'1970-01-01 00:00:00.000004'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:00.001'),
end=Timestamp('1970-01-01 00:00:00.004'),
freq='L')
exp = DatetimeIndex(['1970-01-01 00:00:00.001',
'1970-01-01 00:00:00.002',
'1970-01-01 00:00:00.003',
'1970-01-01 00:00:00.004'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:00:01'),
end=Timestamp('1970-01-01 00:00:04'), freq='S')
exp = DatetimeIndex(['1970-01-01 00:00:01', '1970-01-01 00:00:02',
'1970-01-01 00:00:03', '1970-01-01 00:00:04'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 00:01'),
end=Timestamp('1970-01-01 00:04'), freq='T')
exp = DatetimeIndex(['1970-01-01 00:01', '1970-01-01 00:02',
'1970-01-01 00:03', '1970-01-01 00:04'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01 01:00'),
end=Timestamp('1970-01-01 04:00'), freq='H')
exp = DatetimeIndex(['1970-01-01 01:00', '1970-01-01 02:00',
'1970-01-01 03:00', '1970-01-01 04:00'])
tm.assert_index_equal(idx, exp)
idx = DatetimeIndex(start=Timestamp('1970-01-01'),
end=Timestamp('1970-01-04'), freq='D')
exp = DatetimeIndex(['1970-01-01', '1970-01-02',
'1970-01-03', '1970-01-04'])
tm.assert_index_equal(idx, exp)
class TestDatetime64(object):
def test_datetimeindex_accessors(self):
dti_naive = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
periods=365)
# GH 13303
dti_tz = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
periods=365, tz='US/Eastern')
for dti in [dti_naive, dti_tz]:
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.weekofyear[0] == 1
assert dti.weekofyear[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.weekofyear) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
assert len(dti.weekday_name) == 365
dti.name = 'name'
# non boolean accessors -> return Index
for accessor in DatetimeIndex._field_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == 'name'
# boolean accessors -> return array
for accessor in DatetimeIndex._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq='D', tz=dti.tz, name='name')
tm.assert_index_equal(res, exp)
dti = DatetimeIndex(freq='BQ-FEB', start=datetime(1998, 1, 1),
periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
# CBD requires np >= 1.7
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
pytest.raises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
assert dti.is_month_start[0] == 1
tests = [
(Timestamp('2013-06-01', freq='M').is_month_start, 1),
(Timestamp('2013-06-01', freq='BM').is_month_start, 0),
(Timestamp('2013-06-03', freq='M').is_month_start, 0),
(Timestamp('2013-06-03', freq='BM').is_month_start, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', freq='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', freq='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', freq='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', freq='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', freq='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', freq='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', freq='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', freq='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', freq='BQ').is_month_end, 0),
(Timestamp('2013-06-30', freq='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', freq='BQ').is_year_end, 0),
(Timestamp('2013-06-28', freq='BQ').is_month_end, 1),
(Timestamp('2013-06-28', freq='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', freq='BQ').is_year_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', freq='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', freq='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', freq='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', freq='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', freq='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', freq='AS-NOV').is_year_end, 1),
(Timestamp('2012-02-01').days_in_month, 29),
(Timestamp('2013-02-01').days_in_month, 28)]
for ts, value in tests:
assert ts == value
# GH 6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
assert dates.weekofyear.tolist() == expected
assert [d.weekofyear for d in dates] == expected
# GH 12806
@pytest.mark.parametrize('time_locale', [
None] if tm.get_locales() is None else [None] + tm.get_locales())
def test_datetime_name_accessors(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
expected_months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September',
'October', 'November', 'December']
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
# GH 11128
dti = DatetimeIndex(freq='D', start=datetime(1998, 1, 1),
periods=365)
english_days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
for day, name, eng_name in zip(range(4, 11),
expected_days,
english_days):
name = name.capitalize()
assert dti.weekday_name[day] == eng_name
assert dti.day_name(locale=time_locale)[day] == name
ts = Timestamp(datetime(2016, 4, day))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert ts.weekday_name == eng_name
assert ts.day_name(locale=time_locale) == name
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.day_name(locale=time_locale)[-1])
ts = Timestamp(pd.NaT)
assert np.isnan(ts.day_name(locale=time_locale))
# GH 12805
dti = DatetimeIndex(freq='M', start='2012', end='2013')
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
tm.assert_index_equal(result, expected)
for date, expected in zip(dti, expected_months):
result = date.month_name(locale=time_locale)
assert result == expected.capitalize()
dti = dti.append(DatetimeIndex([pd.NaT]))
assert np.isnan(dti.month_name(locale=time_locale)[-1])
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
tm.assert_index_equal(dti.nanosecond,
pd.Index(np.arange(10, dtype=np.int64)))
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing visualizations display as HTML/JS."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import safe_dom
from controllers import utils
from models import data_sources
from models import jobs
from models import transforms
from models.analytics import utils as analytics_utils
from modules.mapreduce import mapreduce_module
def _generate_display_html(template_renderer, xsrf, app_context,
visualizations):
# Package-protected: pylint: disable=protected-access
# First, load jobs for all generators required for an visualization.
# Jobs may directly contain small results, just hold references to
# larger results, or both.
any_generator_not_running = False
data_source_jobs = {}
for generator_class in analytics_utils._generators_for_visualizations(
visualizations):
job = generator_class(app_context).load()
data_source_jobs[generator_class] = job
if not job or job.has_finished:
any_generator_not_running = True
# Generate HTML section for each visualization.
html_sections = []
for v in visualizations:
html_sections.extend(_generate_visualization_section(
template_renderer, xsrf, app_context, v, data_source_jobs))
# Generate JS to pull contents of data-sources up to page and feed it
# to visualization functions.
html_sections.extend(_generate_data_source_script(template_renderer,
visualizations, xsrf))
# Generate page content
names_of_visualizations_with_generators = []
for visualization in visualizations:
if analytics_utils._generators_for_visualizations([visualization]):
names_of_visualizations_with_generators.append(visualization.name)
rest_sources = [{
'name': rdsc.get_name(),
'title': rdsc.get_title(),
'chunk_size': rdsc.get_default_chunk_size(),
} for rdsc in analytics_utils._rest_data_source_classes(visualizations)]
return template_renderer.render(
None, 'models/analytics/display.html',
{
'sections': html_sections,
'any_generator_not_running': any_generator_not_running,
'xsrf_token_run': xsrf.create_xsrf_token('run_visualizations'),
'visualizations': names_of_visualizations_with_generators,
'rest_sources': rest_sources,
'r': template_renderer.get_current_url(),
})
def _generate_visualization_section(template_renderer, xsrf, app_context,
visualization, data_source_jobs):
html_sections = []
# Collect statuses of generators and build a display messages for each.
generator_status_messages = []
any_generator_still_running = False
all_generators_completed_ok = True
for generator_class in visualization.generator_classes:
job = data_source_jobs[generator_class]
if job is None:
all_generators_completed_ok = False
elif job.status_code != jobs.STATUS_CODE_COMPLETED:
all_generators_completed_ok = False
if not job.has_finished:
any_generator_still_running = True
generator_status_messages.append(
get_generator_status_message(generator_class, job).append(
_get_pipeline_link(xsrf, app_context, generator_class, job)))
# <h3> title block.
html_sections.append(safe_dom.Element('h3').add_text(visualization.title))
html_sections.append(safe_dom.Element('br'))
# Boilerplate content for each visualization's required generators
html_sections.append(template_renderer.render(
None, 'models/analytics/common_footer.html',
{
'visualization': visualization.name,
'any_generator_still_running': any_generator_still_running,
'status_messages': generator_status_messages,
'xsrf_token_run': xsrf.create_xsrf_token('run_visualizations'),
'xsrf_token_cancel': xsrf.create_xsrf_token(
'cancel_visualizations'),
'r': template_renderer.get_current_url(),
}))
# If this source wants to generate inline values for its template,
# and all generators that this source depends are complete (or zero
# generators are depended on) then-and-only-then allow the source
# to generate template values
if all_generators_completed_ok:
template_values = {'visualization': visualization.name}
for source_class in visualization.data_source_classes:
if issubclass(source_class, data_sources.SynchronousQuery):
required_generator_classes = (
source_class.required_generators())
synchronous_query_jobs = []
for generator_class in required_generator_classes:
synchronous_query_jobs.append(
data_source_jobs[generator_class])
source_class.fill_values(app_context, template_values,
*synchronous_query_jobs)
html_sections.append(template_renderer.render(
visualization, visualization.template_name, template_values))
return html_sections
def get_generator_status_message(generator_class, job):
message = safe_dom.NodeList()
generator_description = generator_class.get_description()
if job is None:
message.append(safe_dom.Text(
'Statistics for %s have not been calculated yet' %
generator_description))
elif job.status_code == jobs.STATUS_CODE_COMPLETED:
message.append(safe_dom.Text(
'Statistics for %s were last updated at %s in about %s sec.' % (
generator_description,
job.updated_on.strftime(utils.HUMAN_READABLE_DATETIME_FORMAT),
job.execution_time_sec)))
elif job.status_code == jobs.STATUS_CODE_FAILED:
message.append(safe_dom.Text(
'There was an error updating %s ' % generator_description +
'statistics. Error msg:'))
message.append(safe_dom.Element('br'))
if issubclass(generator_class, jobs.MapReduceJob):
error_message = jobs.MapReduceJob.get_error_message(job)
else:
error_message = job.output
message.append(safe_dom.Element('blockquote').add_child(
safe_dom.Element('pre').add_text(error_message)))
else:
message.append(safe_dom.Text(
'Job for %s statistics started at %s and is running now.' % (
generator_description,
job.updated_on.strftime(utils.HUMAN_READABLE_DATETIME_FORMAT))))
return message
def _get_pipeline_link(xsrf, app_context, generator_class, job):
ret = safe_dom.NodeList()
if (not issubclass(generator_class, jobs.MapReduceJob) or
# Don't give access to the pipeline details UI unless someone
# has actively intended to provide access. The UI allows you to
# kill jobs, and we don't want naive users stumbling around in
# there without adult supervision.
not mapreduce_module.GCB_ENABLE_MAPREDUCE_DETAIL_ACCESS.value or
# Status URL may not be available immediately after job is launched;
# pipeline setup is done w/ 'yield', and happens a bit later.
not job or not jobs.MapReduceJob.has_status_url(job)):
return ret
if job.has_finished:
link_text = 'View completed job run details'
else:
link_text = 'Check status of job'
status_url = jobs.MapReduceJob.get_status_url(
job, app_context.get_namespace_name(),
xsrf.create_xsrf_token(mapreduce_module.XSRF_ACTION_NAME))
ret.append(safe_dom.Text(' '))
ret.append(safe_dom.A(status_url, target='_blank').add_text(link_text))
return ret
def _generate_data_source_script(template_renderer, visualizations, xsrf):
# Build list of {visualization name, [depended-upon data source names]}
display_visualizations = {}
for v in visualizations:
rest_sources = [rsc.get_name() for rsc in v.rest_data_source_classes]
if rest_sources:
display_visualizations[v.name] = {
'callback_name': v.name,
'restSources': rest_sources,
'restSourcesNotYetSeen': {
rest_source: True for rest_source in rest_sources}}
if not display_visualizations:
return []
# Build list of {data source name, [dependent visualization names]}
display_rest_sources = {}
# pylint: disable=protected-access
for rdsc in analytics_utils._rest_data_source_classes(visualizations):
v_names = []
for v in visualizations:
if rdsc in v.rest_data_source_classes:
v_names.append(v.name)
display_rest_sources[rdsc.get_name()] = {
'currentPage': -1,
'pages': [],
'crossfilterDimensions': [],
'sourceContext': None,
'visualizations': v_names}
env = {
'href': template_renderer.get_base_href(),
'visualizations': display_visualizations,
'restSources': display_rest_sources,
'dataSourceToken': data_sources.utils.generate_data_source_token(xsrf),
}
return [template_renderer.render(
None, 'models/analytics/rest_visualizations.html',
{'env': transforms.dumps(env)})]
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, with_config
try:
from unittest import mock
except ImportError:
import mock
import luigi.rpc
from luigi.scheduler import Scheduler
import scheduler_api_test
import luigi.server
from server_test import ServerTestBase
import socket
from multiprocessing import Process, Queue
import requests
class RemoteSchedulerTest(unittest.TestCase):
def testUrlArgumentVariations(self):
for url in ['http://zorg.com', 'http://zorg.com/']:
for suffix in ['api/123', '/api/123']:
s = luigi.rpc.RemoteScheduler(url, 42)
with mock.patch.object(s, '_fetcher') as fetcher:
s._fetch(suffix, '{}')
fetcher.fetch.assert_called_once_with('http://zorg.com/api/123', '{}', 42)
def get_work(self, fetcher_side_effect):
scheduler = luigi.rpc.RemoteScheduler('http://zorg.com', 42)
scheduler._rpc_retry_wait = 1 # shorten wait time to speed up tests
with mock.patch.object(scheduler, '_fetcher') as fetcher:
fetcher.raises = socket.timeout, socket.gaierror
fetcher.fetch.side_effect = fetcher_side_effect
return scheduler.get_work("fake_worker")
def test_retry_rpc_method(self):
"""
Tests that a call to a RPC method is re-tried 3 times.
"""
fetch_results = [socket.timeout, socket.timeout, '{"response":{}}']
self.assertEqual({}, self.get_work(fetch_results))
def test_retry_rpc_limited(self):
"""
Tests that a call to an RPC method fails after the third attempt
"""
fetch_results = [socket.timeout, socket.timeout, socket.timeout]
self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
@mock.patch('luigi.rpc.logger')
def test_log_rpc_retries_enabled(self, mock_logger):
"""
Tests that each retry of an RPC method is logged
"""
fetch_results = [socket.timeout, socket.timeout, '{"response":{}}']
self.get_work(fetch_results)
self.assertEqual([
mock.call.warning('Failed connecting to remote scheduler %r', 'http://zorg.com', exc_info=True),
mock.call.info('Retrying attempt 2 of 3 (max)'),
mock.call.info('Wait for 1 seconds'),
mock.call.warning('Failed connecting to remote scheduler %r', 'http://zorg.com', exc_info=True),
mock.call.info('Retrying attempt 3 of 3 (max)'),
mock.call.info('Wait for 1 seconds'),
], mock_logger.mock_calls)
@with_config({'core': {'rpc-log-retries': 'false'}})
@mock.patch('luigi.rpc.logger')
def test_log_rpc_retries_disabled(self, mock_logger):
"""
Tests that retries of an RPC method are not logged
"""
fetch_results = [socket.timeout, socket.timeout, socket.gaierror]
try:
self.get_work(fetch_results)
self.fail("get_work should have thrown RPCError")
except luigi.rpc.RPCError as e:
self.assertTrue(isinstance(e.sub_exception, socket.gaierror))
self.assertEqual([], mock_logger.mock_calls)
def test_get_work_retries_on_null(self):
"""
Tests that get_work will retry if the response is null
"""
fetch_results = ['{"response": null}', '{"response": {"pass": true}}']
self.assertEqual({'pass': True}, self.get_work(fetch_results))
def test_get_work_retries_on_null_limited(self):
"""
Tests that get_work will give up after the third null response
"""
fetch_results = ['{"response": null}'] * 3 + ['{"response": {}}']
self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
class RPCTest(scheduler_api_test.SchedulerApiTest, ServerTestBase):
def get_app(self):
conf = self.get_scheduler_config()
sch = Scheduler(**conf)
return luigi.server.app(sch)
def setUp(self):
super(RPCTest, self).setUp()
self.sch = luigi.rpc.RemoteScheduler(self.get_url(''))
self.sch._wait = lambda: None
# disable test that doesn't work with remote scheduler
def test_task_first_failure_time(self):
pass
def test_task_first_failure_time_remains_constant(self):
pass
def test_task_has_excessive_failures(self):
pass
def test_quadratic_behavior(self):
""" This would be too slow to run through network """
pass
def test_get_work_speed(self):
""" This would be too slow to run through network """
pass
class RequestsFetcherTest(ServerTestBase):
def test_fork_changes_session(self):
session = requests.Session()
fetcher = luigi.rpc.RequestsFetcher(session)
q = Queue()
def check_session(q):
fetcher.check_pid()
# make sure that check_pid has changed out the session
q.put(fetcher.session != session)
p = Process(target=check_session, args=(q,))
p.start()
p.join()
self.assertTrue(q.get(), 'the requests.Session should have changed in the new process')
class URLLibFetcherTest(ServerTestBase):
def test_url_with_basic_auth(self):
fetcher = luigi.rpc.URLLibFetcher()
# without password
req = fetcher._create_request('http://user@localhost')
self.assertTrue(req.has_header('Authorization'))
self.assertEqual(req.get_header('Authorization'), 'Basic dXNlcjo=')
self.assertEqual(req.get_full_url(), 'http://localhost')
# empty password (same as above)
req = fetcher._create_request('http://user:@localhost')
self.assertTrue(req.has_header('Authorization'))
self.assertEqual(req.get_header('Authorization'), 'Basic dXNlcjo=')
self.assertEqual(req.get_full_url(), 'http://localhost')
# with password
req = fetcher._create_request('http://user:pass@localhost')
self.assertTrue(req.has_header('Authorization'))
self.assertEqual(req.get_header('Authorization'), 'Basic dXNlcjpwYXNz')
self.assertEqual(req.get_full_url(), 'http://localhost')
def test_url_without_basic_auth(self):
fetcher = luigi.rpc.URLLibFetcher()
req = fetcher._create_request('http://localhost')
self.assertFalse(req.has_header('Authorization'))
self.assertEqual(req.get_full_url(), 'http://localhost')
def test_body_encoding(self):
fetcher = luigi.rpc.URLLibFetcher()
# with body
req = fetcher._create_request('http://localhost', body={'foo': 'bar baz/test'})
self.assertEqual(req.data, b'foo=bar+baz%2Ftest')
# without body
req = fetcher._create_request('http://localhost')
self.assertIsNone(req.data)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.cells import rpc_driver
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def make_cell(elem):
elem.set('name')
elem.set('username')
elem.set('type')
elem.set('rpc_host')
elem.set('rpc_port')
caps = xmlutil.SubTemplateElement(elem, 'capabilities',
selector='capabilities')
cap = xmlutil.SubTemplateElement(caps, xmlutil.Selector(0),
selector=xmlutil.get_items)
cap.text = 1
make_capacity(elem)
def make_capacity(cell):
def get_units_by_mb(capacity_info):
return capacity_info['units_by_mb'].items()
capacity = xmlutil.SubTemplateElement(cell, 'capacities',
selector='capacities')
ram_free = xmlutil.SubTemplateElement(capacity, 'ram_free',
selector='ram_free')
ram_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(ram_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
disk_free = xmlutil.SubTemplateElement(capacity, 'disk_free',
selector='disk_free')
disk_free.set('total_mb', 'total_mb')
unit_by_mb = xmlutil.SubTemplateElement(disk_free, 'unit_by_mb',
selector=get_units_by_mb)
unit_by_mb.set('mb', 0)
unit_by_mb.set('unit', 1)
cell_nsmap = {None: wsgi.XMLNS_V10}
class CellTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cell', selector='cell')
make_cell(root)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('cells')
elem = xmlutil.SubTemplateElement(root, 'cell', selector='cells')
make_cell(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=cell_nsmap)
class CellDeserializer(wsgi.XMLDeserializer):
"""Deserializer to handle xml-formatted cell create requests."""
def _extract_capabilities(self, cap_node):
caps = {}
for cap in cap_node.childNodes:
cap_name = cap.tagName
caps[cap_name] = self.extract_text(cap)
return caps
def _extract_cell(self, node):
cell = {}
cell_node = self.find_first_child_named(node, 'cell')
extract_fns = {
'capabilities': self._extract_capabilities,
'rpc_port': lambda child: int(self.extract_text(child)),
}
for child in cell_node.childNodes:
name = child.tagName
extract_fn = extract_fns.get(name, self.extract_text)
cell[name] = extract_fn(child)
return cell
def default(self, string):
"""Deserialize an xml-formatted cell create request."""
node = xmlutil.safe_minidom_parse_string(string)
return {'body': {'cell': self._extract_cell(node)}}
def _filter_keys(item, keys):
"""
Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""
If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport = rpc_driver.parse_transport_url(transport_url)
except ValueError:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return cell_info
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = transport[transport_field]
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellsTemplate)
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellsTemplate)
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(())
@wsgi.serializers(xml=CellTemplate)
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors(404)
@wsgi.serializers(xml=CellTemplate)
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors(404)
@wsgi.serializers(xml=CellTemplate)
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""
Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport = {}
if existing and 'transport_url' in existing:
transport = rpc_driver.parse_transport_url(
existing['transport_url'])
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
'virtual_host': 'rpc_virtual_host',
}
for key, input_field in transport_field_map.items():
# Set the default value of the field; using setdefault()
# lets us avoid overriding the existing transport URL
transport.setdefault(key, None)
# Only override the value if we're given an override
if input_field in cell:
transport[key] = cell.pop(input_field)
# Now set the transport URL
cell['transport_url'] = rpc_driver.unparse_transport_url(transport)
@extensions.expected_errors((400, 403))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
@wsgi.response(201)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404))
@wsgi.serializers(xml=CellTemplate)
@wsgi.deserializers(xml=CellDeserializer)
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors(400)
@wsgi.response(204)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if isinstance(deleted, six.string_types):
try:
deleted = strutils.bool_from_string(deleted, strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=str(err))
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/cells/api/v3"
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
| |
# -*- coding: utf-8 -*-
'''
Manage transport commands via ssh
'''
# Import python libs
import os
import time
import subprocess
# Import salt libs
import salt.utils
import salt.utils.nb_popen
def gen_key(path):
'''
Generate a key for use with salt-ssh
'''
cmd = 'ssh-keygen -P "" -f {0} -t rsa -q'.format(path)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
subprocess.call(cmd, shell=True)
class Shell(object):
'''
Create a shell connection object to encapsulate ssh executions
'''
def __init__(
self,
opts,
host,
user=None,
port=None,
passwd=None,
priv=None,
timeout=None,
sudo=False,
tty=False):
self.opts = opts
self.host = host
self.user = user
self.port = port
self.passwd = passwd
self.priv = priv
self.timeout = timeout
self.sudo = sudo
self.tty = tty
def get_error(self, errstr):
'''
Parse out an error and return a targeted error string
'''
for line in errstr.split('\n'):
if line.startswith('ssh:'):
return line
if line.startswith('Pseudo-terminal'):
continue
if 'to the list of known hosts.' in line:
continue
return line
return errstr
def _key_opts(self):
'''
Return options for the ssh command base for Salt to call
'''
options = [
'KbdInteractiveAuthentication=no',
'GSSAPIAuthentication=no',
'PasswordAuthentication=no',
]
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.port:
options.append('Port={0}'.format(self.port))
if self.priv:
options.append('IdentityFile={0}'.format(self.priv))
if self.user:
options.append('User={0}'.format(self.user))
ret = []
for option in options:
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _passwd_opts(self):
'''
Return options to pass to sshpass
'''
# TODO ControlMaster does not work without ControlPath
# user could take advantage of it if they set ControlPath in their
# ssh config. Also, ControlPersist not widely available.
options = ['ControlMaster=auto',
'StrictHostKeyChecking=no',
'GSSAPIAuthentication=no',
]
options.append('ConnectTimeout={0}'.format(self.timeout))
if self.opts.get('ignore_host_keys'):
options.append('StrictHostKeyChecking=no')
if self.passwd:
options.extend(['PasswordAuthentication=yes',
'PubkeyAuthentication=no'])
else:
options.extend(['PasswordAuthentication=no',
'PubkeyAuthentication=yes',
'KbdInteractiveAuthentication=no',
'ChallengeResponseAuthentication=no',
'BatchMode=yes'])
if self.port:
options.append('Port={0}'.format(self.port))
if self.user:
options.append('User={0}'.format(self.user))
ret = []
for option in options:
ret.append('-o {0} '.format(option))
return ''.join(ret)
def _copy_id_str_old(self):
'''
Return the string to execute ssh-copy-id
'''
if self.passwd and salt.utils.which('sshpass'):
# Using single quotes prevents shell expansion and
# passwords containig '$'
return "sshpass -p '{0}' {1} {2} '{3} -p {4} {5}@{6}'".format(
self.passwd,
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self.user,
self.host)
return None
def _copy_id_str_new(self):
'''
Since newer ssh-copy-id commands ingest option differently we need to
have two commands
'''
if self.passwd and salt.utils.which('sshpass'):
# Using single quotes prevents shell expansion and
# passwords containig '$'
return "sshpass -p '{0}' {1} {2} {3} -p {4} {5}@{6}".format(
self.passwd,
'ssh-copy-id',
'-i {0}.pub'.format(self.priv),
self._passwd_opts(),
self.port,
self.user,
self.host)
return None
def copy_id(self):
'''
Execute ssh-copy-id to plant the id file on the target
'''
stdout, stderr = self._run_cmd(self._copy_id_str_old())
if stderr.startswith('Usage'):
self._run_cmd(self._copy_id_str_new())
def _cmd_str(self, cmd, ssh='ssh'):
'''
Return the cmd string to execute
'''
# TODO: if tty, then our SSH_SHIM cannot be supplied from STDIN Will
# need to deliver the SHIM to the remote host and execute it there
if self.passwd and salt.utils.which('sshpass'):
opts = self._passwd_opts()
# Using single quotes prevents shell expansion and
# passwords containig '$'
return "sshpass -p '{0}' {1} {2} {3} {4} {5}".format(
self.passwd,
ssh,
'' if ssh == 'scp' else self.host,
'-t -t' if self.tty else '',
opts,
cmd)
if self.priv:
opts = self._key_opts()
return "{0} {1} {2} {3} {4}".format(
ssh,
'' if ssh == 'scp' else self.host,
'-t -t' if self.tty else '',
opts,
cmd)
return None
def _run_cmd(self, cmd):
'''
Cleanly execute the command string
'''
try:
proc = subprocess.Popen(
cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
data = proc.communicate()
return data
except Exception:
return ('local', 'Unknown Error')
def _run_nb_cmd(self, cmd):
'''
cmd iterator
'''
try:
proc = salt.utils.nb_popen.NonBlockingPopen(
cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
while True:
time.sleep(0.1)
out = proc.recv()
err = proc.recv_err()
if out is None and err is None:
break
if err:
err = self.get_error(err)
yield out, err
except Exception:
yield ('', 'Unknown Error')
def exec_nb_cmd(self, cmd):
'''
Yield None until cmd finished
'''
r_out = []
r_err = []
cmd = self._cmd_str(cmd)
for out, err in self._run_nb_cmd(cmd):
if out is not None:
r_out.append(out)
if err is not None:
r_err.append(err)
yield None, None
yield ''.join(r_out), ''.join(r_err)
def exec_cmd(self, cmd):
'''
Execute a remote command
'''
cmd = self._cmd_str(cmd)
ret = self._run_cmd(cmd)
return ret
def send(self, local, remote):
'''
scp a file or files to a remote system
'''
cmd = '{0} {1}:{2}'.format(local, self.host, remote)
cmd = self._cmd_str(cmd, ssh='scp')
return self._run_cmd(cmd)
| |
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test remote_runner module."""
from inspect import signature
import json
from typing import Optional, Dict, Tuple, Union, List, ForwardRef
import unittest
from google.cloud import aiplatform
from google_cloud_pipeline_components.aiplatform import utils
INIT_KEY = 'init'
METHOD_KEY = 'method'
class UtilsTests(unittest.TestCase):
def setUp(self):
super(UtilsTests, self).setUp()
def _test_method(
self, credentials: Optional, sync: bool, input_param: str,
input_name: str
):
"""Test short description.
Long descirption
Args:
credentials: credentials
sync: sync
input_param: input_param
input_name:input_name
"""
def test_get_forward_reference_with_annotation_str(self):
annotation = aiplatform.Model.__name__
results = utils.get_forward_reference(annotation)
self.assertEqual(results, aiplatform.Model)
def test_get_forward_reference_with_annotation_forward_reference(self):
annotation = ForwardRef(aiplatform.Model.__name__)
results = utils.get_forward_reference(annotation)
self.assertEqual(results, aiplatform.Model)
def test_resolve_annotation_with_annotation_class(self):
annotation = aiplatform.Model
results = utils.resolve_annotation(annotation)
self.assertEqual(results, annotation)
def test_resolve_annotation_with_annotation_foward_str_reference(self):
annotation = aiplatform.Model.__name__
results = utils.resolve_annotation(annotation)
self.assertEqual(results, aiplatform.Model)
def test_resolve_annotation_with_annotation_foward_typed_reference(self):
annotation = ForwardRef(aiplatform.Model.__name__)
results = utils.resolve_annotation(annotation)
self.assertEqual(results, aiplatform.Model)
def test_resolve_annotation_with_annotation_type_union(self):
annotation = Union[Dict, None]
results = utils.resolve_annotation(annotation)
self.assertEqual(results, Dict)
def test_resolve_annotation_with_annotation_type_empty(self):
annotation = None
results = utils.resolve_annotation(annotation)
self.assertEqual(results, None)
def test_is_serializable_to_json_with_serializable_type(self):
annotation = Dict
results = utils.is_serializable_to_json(annotation)
self.assertTrue(results)
def test_is_serializable_to_json_with_not_serializable_type(self):
annotation = Tuple
results = utils.is_serializable_to_json(annotation)
self.assertFalse(results)
def test_is_mb_sdk_resource_noun_type_with_not_noun_type(self):
annotation = Tuple
results = utils.is_serializable_to_json(annotation)
self.assertFalse(results)
def test_is_mb_sdk_resource_noun_type_with_resource_noun_type(self):
mb_sdk_type = aiplatform.Model
results = utils.is_mb_sdk_resource_noun_type(mb_sdk_type)
self.assertTrue(results)
def test_get_serializer_with_serializable_type(self):
annotation = Dict
results = utils.get_serializer(annotation)
self.assertEqual(results, json.dumps)
def test_get_serializer_with_not_serializable_type(self):
annotation = Tuple
results = utils.get_serializer(annotation)
self.assertEqual(results, None)
def test_get_deserializer_with_serializable_type(self):
annotation = Dict
results = utils.get_deserializer(annotation)
self.assertEqual(results, json.loads)
def test_get_deserializer_with_not_serializable_type(self):
annotation = Tuple
results = utils.get_deserializer(annotation)
self.assertEqual(results, None)
def test_map_resource_to_metadata_type_with_mb_sdk_type(self):
mb_sdk_type = aiplatform.Model
parameter_name, parameter_type = utils.map_resource_to_metadata_type(
mb_sdk_type
)
self.assertEqual(parameter_name, 'model')
self.assertEqual(parameter_type, 'google.VertexModel')
def test_map_resource_to_metadata_type_with_serializable_type(self):
mb_sdk_type = List
parameter_name, parameter_type = utils.map_resource_to_metadata_type(
mb_sdk_type
)
self.assertEqual(parameter_name, 'exported_dataset')
self.assertEqual(parameter_type, 'google.VertexDataset')
def test_map_resource_to_metadata_type_with__Dataset_type(self):
mb_sdk_type = '_Dataset'
parameter_name, parameter_type = utils.map_resource_to_metadata_type(
mb_sdk_type
)
self.assertEqual(parameter_name, 'dataset')
self.assertEqual(parameter_type, 'google.VertexDataset')
def test_is_resource_name_parameter_name_with_display_name(self):
param_name = 'display_name'
self.assertFalse(utils.is_resource_name_parameter_name(param_name))
def test_is_resource_name_parameter_name_with_encryption_spec_key_name(
self
):
param_name = 'display_name'
self.assertFalse(utils.is_resource_name_parameter_name(param_name))
def test_is_resource_name_parameter_name_with_resource_name(self):
param_name = 'testresource_name'
self.assertTrue(utils.is_resource_name_parameter_name(param_name))
def test_filter_signature_with_PARAMS_TO_REMOVE(self):
def test_method(self, credentials: Optional, sync, init_param: str):
pass
filtered_signature = utils.filter_signature(signature(test_method))
expected_output_str = "<Signature (init_param: str)>"
self.assertEqual(repr(filtered_signature), expected_output_str)
def test_filter_signature_with_resouce_name(self):
def test_method(model_name: str):
pass
param_map = {}
filtered_signature = utils.filter_signature(
signature=signature(test_method),
is_init_signature=True,
self_type=str,
component_param_name_to_mb_sdk_param_name=param_map
)
expected_output_str = "<Signature (model: str)>"
self.assertEqual(repr(filtered_signature), expected_output_str)
def test_signatures_union_with_(self):
def test_init(init_param: str):
pass
def test_method(method_param: str):
pass
init_signature = signature(test_init)
method_signature = signature(test_method)
results = utils.signatures_union(init_signature, method_signature)
expected_results = "<Signature (init_param: str, method_param: str)>"
self.assertEqual(repr(results), expected_results)
def test_signatures_union_with_PARAMS_TO_REMOVE(self):
test_docstring_method_signature = signature(self._test_method)
docstring = self._test_method.__doc__
results = utils.filter_docstring_args(
signature=test_docstring_method_signature,
docstring=docstring,
is_init_signature=True
)
expected_results = {'input': 'input_name', 'input_param': 'input_param'}
self.assertDictEqual(results, expected_results)
def test_generate_docstring_with_PARAMS_TO_REMOVE(self):
args_dict = {'input': 'input_name', 'input_param': 'input_param'}
param_map = {}
converted_method_signature = utils.filter_signature(
signature(self._test_method),
is_init_signature=True,
component_param_name_to_mb_sdk_param_name=param_map
)
results = utils.generate_docstring(
args_dict=args_dict,
signature=converted_method_signature,
method_docstring=self._test_method.__doc__
)
expected_results = "".join(
"Test short description.\n"
"Long descirption\n\nArgs:\n"
" input:\n"
" input_name\n"
" input_param:\n"
" input_param\n"
)
self.assertEqual(results, expected_results)
def test_custom_training_typed_dataset_annotation_defaults_to_using_base_dataset(
self
):
dataset_annotation = signature(aiplatform.CustomTrainingJob.run
).parameters['dataset'].annotation
assert utils.resolve_annotation(
dataset_annotation
) is aiplatform.datasets.dataset._Dataset
dataset_annotation = signature(
aiplatform.CustomContainerTrainingJob.run
).parameters['dataset'].annotation
assert utils.resolve_annotation(
dataset_annotation
) is aiplatform.datasets.dataset._Dataset
dataset_annotation = signature(
aiplatform.CustomPythonPackageTrainingJob.run
).parameters['dataset'].annotation
assert utils.resolve_annotation(
dataset_annotation
) is aiplatform.datasets.dataset._Dataset
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @authors: Shweta Padubidri, Cisco Systems, Inc.
# Peter Strunk , Cisco Systems, Inc.
# Shubhangi Satras , Cisco Systems, Inc.
import logging
import os.path
import unittest
import routes
import webob
from webtest import TestApp
from quantum import api as server
from quantum.api import extensions
from quantum.api.extensions import (
ExtensionMiddleware,
PluginAwareExtensionManager,
)
from quantum.common import config
from quantum.extensions import (
credential,
multiport,
novatenant,
portprofile,
qos,
)
from quantum.manager import QuantumManager
from quantum.openstack.common import jsonutils
from quantum.plugins.cisco.db import api as db
from quantum.plugins.cisco import l2network_plugin
from quantum.plugins.cisco.l2network_plugin import L2Network
from quantum.tests.unit.extension_stubs import StubBaseAppController
from quantum import wsgi
LOG = logging.getLogger('quantum.plugins.cisco.tests.test_cisco_extensions')
EXTENSIONS_PATH = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, os.pardir, "extensions")
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
UNITDIR = os.path.join(ROOTDIR, 'unit')
def testsdir(*p):
return os.path.join(UNITDIR, *p)
config_file = 'quantum.conf.cisco.test'
args = ['--config-file', testsdir(config_file)]
config.parse(args=args)
class ExtensionsTestApp(wsgi.Router):
def __init__(self, options=None):
options = options or {}
mapper = routes.Mapper()
controller = StubBaseAppController()
mapper.resource("dummy_resource", "/dummy_resources",
controller=controller)
super(ExtensionsTestApp, self).__init__(mapper)
class PortprofileExtensionTest(unittest.TestCase):
def setUp(self):
""" Set up function """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
member_actions = {'associate_portprofile': "PUT",
'disassociate_portprofile': "PUT"}
controller = portprofile.PortprofilesController(
QuantumManager.get_plugin())
res_ext = extensions.ResourceExtension('portprofiles', controller,
parent=parent_resource,
member_actions=member_actions)
self.test_app = setup_extensions_test_app(
SimpleExtensionManager(res_ext))
self.contenttype = 'application/json'
self.profile_path = '/extensions/csco/tenants/tt/portprofiles'
self.portprofile_path = '/extensions/csco/tenants/tt/portprofiles/'
self.test_port_profile = {
'portprofile': {
'portprofile_name': 'cisco_test_portprofile',
'qos_name': 'test-qos1',
},
}
self.tenant_id = "test_tenant"
self.network_name = "test_network"
self.api = server.APIRouterV10()
self._l2network_plugin = l2network_plugin.L2Network()
def test_list_portprofile(self):
""" Test List Portprofile"""
LOG.debug("test_list_portprofile - START")
req_body1 = jsonutils.dumps(self.test_port_profile)
create_response1 = self.test_app.post(
self.profile_path, req_body1,
content_type=self.contenttype
)
req_body2 = jsonutils.dumps({
'portprofile': {
'portprofile_name': 'cisco_test_portprofile2',
'qos_name': 'test-qos2',
},
})
create_response2 = self.test_app.post(
self.profile_path, req_body2,
content_type=self.contenttype)
index_response = self.test_app.get(self.profile_path)
index_resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
self.assertEqual(200, index_response.status_int)
resp_body1 = wsgi.Serializer().deserialize(create_response1.body,
self.contenttype)
portprofile_path1_temp = (
self.portprofile_path +
resp_body1['portprofiles']['portprofile']['id'])
portprofile_path1 = str(portprofile_path1_temp)
resp_body2 = wsgi.Serializer().deserialize(create_response2.body,
self.contenttype)
list_all_portprofiles = [resp_body1['portprofiles']['portprofile'],
resp_body2['portprofiles']['portprofile']]
self.assertTrue(index_resp_body['portprofiles'][0] in
list_all_portprofiles)
self.assertTrue(index_resp_body['portprofiles'][1] in
list_all_portprofiles)
portprofile_path2_temp = (
self.portprofile_path +
resp_body2['portprofiles']['portprofile']['id'])
portprofile_path2 = str(portprofile_path2_temp)
# Clean Up - Delete the Port Profiles
self.tear_down_profile(portprofile_path1)
self.tear_down_profile(portprofile_path2)
LOG.debug("test_list_portprofile - END")
def test_create_portprofile(self):
""" Test create Portprofile"""
LOG.debug("test_create_portprofile - START")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(self.profile_path, req_body,
content_type=self.contenttype)
self.assertEqual(200, index_response.status_int)
# Clean Up - Delete the Port Profile
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
portprofile_path_temp = (
self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
portprofile_path = str(portprofile_path_temp)
self.tear_down_profile(portprofile_path)
LOG.debug("test_create_portprofile - END")
def test_create_portprofileBADRequest(self):
""" Test create Portprofile Bad Request"""
LOG.debug("test_create_portprofileBADRequest - START")
index_response = self.test_app.post(self.profile_path, 'BAD_REQUEST',
content_type=self.contenttype,
status='*')
self.assertEqual(400, index_response.status_int)
LOG.debug("test_create_portprofileBADRequest - END")
def test_show_portprofile(self):
""" Test show Portprofile """
LOG.debug("test_show_portprofile - START")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
show_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
show_port_path = str(show_path_temp)
show_response = self.test_app.get(show_port_path)
show_resp_dict = wsgi.Serializer().deserialize(show_response.body,
self.contenttype)
self.assertEqual(
show_resp_dict['portprofiles']['portprofile']['qos_name'],
self.test_port_profile['portprofile']['qos_name'])
self.assertEqual(
show_resp_dict['portprofiles']['portprofile']['name'],
self.test_port_profile['portprofile']['portprofile_name'])
self.assertEqual(200, show_response.status_int)
# Clean Up - Delete the Port Profile
self.tear_down_profile(show_port_path)
LOG.debug("test_show_portprofile - END")
def test_show_portprofileDNE(self, portprofile_id='100'):
""" Test show Portprofile does not exist"""
LOG.debug("test_show_portprofileDNE - START")
show_path_temp = self.portprofile_path + portprofile_id
show_port_path = str(show_path_temp)
show_response = self.test_app.get(show_port_path, status='*')
self.assertEqual(450, show_response.status_int)
LOG.debug("test_show_portprofileDNE - END")
def test_update_portprofile(self):
""" Test update Portprofile"""
LOG.debug("test_update_portprofile - START")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(
self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
rename_port_profile = {
'portprofile': {
'portprofile_name': 'cisco_rename_portprofile',
'qos_name': 'test-qos1',
},
}
rename_req_body = jsonutils.dumps(rename_port_profile)
rename_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, rename_req_body,
content_type=self.contenttype)
rename_resp_dict = wsgi.Serializer().deserialize(rename_response.body,
self.contenttype)
self.assertEqual(
rename_resp_dict['portprofiles']['portprofile']['qos_name'],
self.test_port_profile['portprofile']['qos_name'])
self.assertEqual(
rename_resp_dict['portprofiles']['portprofile']['name'],
rename_port_profile['portprofile']['portprofile_name'])
self.assertEqual(200, rename_response.status_int)
# Clean Up - Delete the Port Profile
self.tear_down_profile(rename_path)
LOG.debug("test_update_portprofile - END")
def test_update_portprofileBADRequest(self):
""" Test update Portprofile Bad Request"""
LOG.debug("test_update_portprofileBADRequest - START")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(
self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
rename_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, 'BAD_REQUEST',
status='*')
self.assertEqual(400, rename_response.status_int)
# Clean Up - Delete the Port Profile
self.tear_down_profile(rename_path)
LOG.debug("test_update_portprofileBADRequest - END")
def test_update_portprofileDNE(self, portprofile_id='100'):
""" Test update Portprofile does not exist"""
LOG.debug("test_update_portprofileiDNE - START")
rename_port_profile = {
'portprofile': {
'portprofile_name': 'cisco_rename_portprofile',
'qos_name': 'test-qos1',
},
}
rename_req_body = jsonutils.dumps(rename_port_profile)
update_path_temp = self.portprofile_path + portprofile_id
update_path = str(update_path_temp)
update_response = self.test_app.put(update_path, rename_req_body,
content_type=self.contenttype,
status='*')
self.assertEqual(450, update_response.status_int)
LOG.debug("test_update_portprofileDNE - START")
def test_delete_portprofile(self):
""" Test delete Portprofile"""
LOG.debug("test_delete_portprofile - START")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(
self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
delete_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path)
self.assertEqual(200, delete_response.status_int)
LOG.debug("test_delete_portprofile - END")
def test_delete_portprofileDNE(self, portprofile_id='100'):
""" Test delete Portprofile does not exist"""
LOG.debug("test_delete_portprofileDNE - START")
delete_path_temp = self.portprofile_path + portprofile_id
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path, status='*')
self.assertEqual(450, delete_response.status_int)
LOG.debug("test_delete_portprofileDNE - END")
def create_request(self, path, body, content_type, method='GET'):
""" Test create request"""
LOG.debug("test_create_request - START")
req = webob.Request.blank(path)
req.method = method
req.headers = {}
req.headers['Accept'] = content_type
req.body = body
LOG.debug("test_create_request - END")
return req
def _create_network(self, name=None):
""" Test create network"""
LOG.debug("Creating network - START")
if name:
net_name = name
else:
net_name = self.network_name
net_path = "/tenants/tt/networks"
net_data = {'network': {'name': '%s' % net_name}}
req_body = wsgi.Serializer().serialize(net_data, self.contenttype)
network_req = self.create_request(net_path, req_body,
self.contenttype, 'POST')
network_res = network_req.get_response(self.api)
network_data = wsgi.Serializer().deserialize(network_res.body,
self.contenttype)
LOG.debug("Creating network - END")
return network_data['network']['id']
def _create_port(self, network_id, port_state):
""" Test create port"""
LOG.debug("Creating port for network %s - START", network_id)
port_path = "/tenants/tt/networks/%s/ports" % network_id
port_req_data = {'port': {'state': '%s' % port_state}}
req_body = wsgi.Serializer().serialize(port_req_data,
self.contenttype)
port_req = self.create_request(port_path, req_body,
self.contenttype, 'POST')
port_res = port_req.get_response(self.api)
port_data = wsgi.Serializer().deserialize(port_res.body,
self.contenttype)
LOG.debug("Creating port for network - END")
return port_data['port']['id']
def _delete_port(self, network_id, port_id):
""" Delete port """
LOG.debug("Deleting port for network %s - START", network_id)
port_path = ("/tenants/tt/networks/%(network_id)s/ports/%(port_id)s" %
locals())
port_req = self.create_request(port_path, None,
self.contenttype, 'DELETE')
port_req.get_response(self.api)
LOG.debug("Deleting port for network - END")
def _delete_network(self, network_id):
""" Delete network """
LOG.debug("Deleting network %s - START", network_id)
network_path = "/tenants/tt/networks/%s" % network_id
network_req = self.create_request(network_path, None,
self.contenttype, 'DELETE')
network_req.get_response(self.api)
LOG.debug("Deleting network - END")
def test_associate_portprofile(self):
""" Test associate portprofile"""
LOG.debug("test_associate_portprofile - START")
net_id = self._create_network()
port_id = self._create_port(net_id, "ACTIVE")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(
self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
test_port_assign_data = {
'portprofile': {
'network-id': net_id,
'port-id': port_id,
},
}
req_assign_body = jsonutils.dumps(test_port_assign_data)
associate_path_temp = (
self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'] +
"/associate_portprofile")
associate_path = str(associate_path_temp)
associate_response = self.test_app.put(
associate_path, req_assign_body,
content_type=self.contenttype)
self.assertEqual(200, associate_response.status_int)
# Clean Up - Disassociate and Delete the Port Profile
disassociate_path_temp = (
self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'] +
"/disassociate_portprofile")
disassociate_path = str(disassociate_path_temp)
delete_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
delete_path = str(delete_path_temp)
self.tear_down_associate_profile(delete_path, disassociate_path,
req_assign_body)
self.tear_down_port_network(net_id, port_id)
LOG.debug("test_associate_portprofile - END")
def test_associate_portprofileDNE(self, portprofile_id='100'):
""" Test associate portprofile does not exist"""
LOG.debug("test_associate_portprofileDNE - START")
test_port_assign_data = {
'portprofile': {
'network-id': '001',
'port-id': '1',
},
}
req_assign_body = jsonutils.dumps(test_port_assign_data)
associate_path = (self.portprofile_path +
portprofile_id +
"/associate_portprofile")
associate_response = self.test_app.put(
associate_path, req_assign_body,
content_type=self.contenttype, status='*')
self.assertEqual(450, associate_response.status_int)
LOG.debug("test_associate_portprofileDNE - END")
def test_disassociate_portprofile(self):
""" Test disassociate portprofile"""
LOG.debug("test_disassociate_portprofile - START")
net_id = self._create_network()
port_id = self._create_port(net_id, "ACTIVE")
req_body = jsonutils.dumps(self.test_port_profile)
index_response = self.test_app.post(
self.profile_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
test_port_assign_data = {
'portprofile': {
'network-id': net_id,
'port-id': port_id,
},
}
req_assign_body = jsonutils.dumps(test_port_assign_data)
associate_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'] +
"/associate_portprofile")
associate_path = str(associate_path_temp)
self.test_app.put(associate_path, req_assign_body,
content_type=self.contenttype)
disassociate_path_temp = (
self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'] +
"/disassociate_portprofile")
disassociate_path = str(disassociate_path_temp)
disassociate_response = self.test_app.put(
disassociate_path, req_assign_body,
content_type=self.contenttype)
self.assertEqual(200, disassociate_response.status_int)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
delete_path_temp = (self.portprofile_path +
resp_body['portprofiles']['portprofile']['id'])
delete_path = str(delete_path_temp)
self.tear_down_profile(delete_path)
self.tear_down_port_network(net_id, port_id)
LOG.debug("test_disassociate_portprofile - END")
def tear_down_port_network(self, net_id, port_id):
""" Tear down port and network """
self._delete_port(net_id, port_id)
self._delete_network(net_id)
def tear_down_profile(self, delete_profile_path):
""" Tear down profile"""
self.test_app.delete(delete_profile_path)
def tear_down_associate_profile(self, delete_profile_path,
dissociate_profile_path, req_body):
""" Tear down associate profile"""
self.test_app.put(dissociate_profile_path, req_body,
content_type=self.contenttype)
self.tear_down_profile(delete_profile_path)
def tearDown(self):
""" Tear down """
db.clear_db()
class NovatenantExtensionTest(unittest.TestCase):
def setUp(self):
""" Set up function"""
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
member_actions = {'schedule_host': "PUT",
'associate_port': "PUT"}
controller = novatenant.NovatenantsController(
QuantumManager.get_plugin())
res_ext = extensions.ResourceExtension('novatenants', controller,
parent=parent_resource,
member_actions=member_actions)
self.test_app = setup_extensions_test_app(
SimpleExtensionManager(res_ext))
self.contenttype = 'application/json'
self.novatenants_path = '/extensions/csco/tenants/tt/novatenants/'
self.test_associate_port_data = {
'novatenant': {
'instance_id': 1,
'instance_desc': {
'project_id': 'demo',
'user_id': 'root',
'vif_id': '23432423',
},
},
}
self.test_associate_data = {
'novatenant': {
'instance_id': 1,
'instance_desc': {
'project_id': 'demo',
'user_id': 'root',
},
},
}
self._l2network_plugin = l2network_plugin.L2Network()
def test_schedule_host(self):
""" Test get host"""
LOG.debug("test_schedule_host - START")
req_body = jsonutils.dumps(self.test_associate_data)
host_path = self.novatenants_path + "001/schedule_host"
host_response = self.test_app.put(
host_path, req_body,
content_type=self.contenttype)
self.assertEqual(200, host_response.status_int)
LOG.debug("test_schedule_host - END")
def test_schedule_hostBADRequest(self):
""" Test get host bad request"""
LOG.debug("test_schedule_hostBADRequest - START")
host_path = self.novatenants_path + "001/schedule_host"
host_response = self.test_app.put(
host_path, 'BAD_REQUEST',
content_type=self.contenttype, status='*')
self.assertEqual(400, host_response.status_int)
LOG.debug("test_schedule_hostBADRequest - END")
def test_associate_port(self):
""" Test get associate port """
LOG.debug("test_associate_port - START")
req_body = jsonutils.dumps(self.test_associate_port_data)
associate_port_path = self.novatenants_path + "001/associate_port"
associate_port_response = self.test_app.put(
associate_port_path, req_body,
content_type=self.contenttype)
self.assertEqual(200, associate_port_response.status_int)
LOG.debug("test_associate_port - END")
def tearDown(self):
""" Tear down """
db.clear_db()
class QosExtensionTest(unittest.TestCase):
def setUp(self):
""" Set up function """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
controller = qos.QosController(QuantumManager.get_plugin())
res_ext = extensions.ResourceExtension('qos', controller,
parent=parent_resource)
self.test_app = setup_extensions_test_app(
SimpleExtensionManager(res_ext))
self.contenttype = 'application/json'
self.qos_path = '/extensions/csco/tenants/tt/qos'
self.qos_second_path = '/extensions/csco/tenants/tt/qos/'
self.test_qos_data = {
'qos': {
'qos_name': 'cisco_test_qos',
'qos_desc': {
'PPS': 50,
'TTL': 5,
},
},
}
self._l2network_plugin = l2network_plugin.L2Network()
def test_create_qos(self):
""" Test create qos """
LOG.debug("test_create_qos - START")
req_body = jsonutils.dumps(self.test_qos_data)
index_response = self.test_app.post(self.qos_path,
req_body,
content_type=self.contenttype)
self.assertEqual(200, index_response.status_int)
# Clean Up - Delete the qos
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
qos_path_temp = self.qos_second_path + resp_body['qoss']['qos']['id']
qos_path = str(qos_path_temp)
self.tearDownQos(qos_path)
LOG.debug("test_create_qos - END")
def test_create_qosBADRequest(self):
""" Test create qos bad request """
LOG.debug("test_create_qosBADRequest - START")
index_response = self.test_app.post(self.qos_path,
'BAD_REQUEST',
content_type=self.contenttype,
status='*')
self.assertEqual(400, index_response.status_int)
LOG.debug("test_create_qosBADRequest - END")
def test_list_qoss(self):
""" Test list qoss """
LOG.debug("test_list_qoss - START")
req_body1 = jsonutils.dumps(self.test_qos_data)
create_resp1 = self.test_app.post(self.qos_path, req_body1,
content_type=self.contenttype)
req_body2 = jsonutils.dumps({
'qos': {
'qos_name': 'cisco_test_qos2',
'qos_desc': {
'PPS': 50,
'TTL': 5,
},
},
})
create_resp2 = self.test_app.post(self.qos_path, req_body2,
content_type=self.contenttype)
index_response = self.test_app.get(self.qos_path)
index_resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
self.assertEqual(200, index_response.status_int)
# Clean Up - Delete the qos's
resp_body1 = wsgi.Serializer().deserialize(create_resp1.body,
self.contenttype)
qos_path1_temp = self.qos_second_path + resp_body1['qoss']['qos']['id']
qos_path1 = str(qos_path1_temp)
resp_body2 = wsgi.Serializer().deserialize(create_resp2.body,
self.contenttype)
list_all_qos = [resp_body1['qoss']['qos'], resp_body2['qoss']['qos']]
self.assertTrue(index_resp_body['qoss'][0] in list_all_qos)
self.assertTrue(index_resp_body['qoss'][1] in list_all_qos)
qos_path2_temp = self.qos_second_path + resp_body2['qoss']['qos']['id']
qos_path2 = str(qos_path2_temp)
self.tearDownQos(qos_path1)
self.tearDownQos(qos_path2)
LOG.debug("test_list_qoss - END")
def test_show_qos(self):
""" Test show qos """
LOG.debug("test_show_qos - START")
req_body = jsonutils.dumps(self.test_qos_data)
index_response = self.test_app.post(self.qos_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
show_path_temp = self.qos_second_path + resp_body['qoss']['qos']['id']
show_qos_path = str(show_path_temp)
show_response = self.test_app.get(show_qos_path)
show_resp_dict = wsgi.Serializer().deserialize(show_response.body,
self.contenttype)
self.assertEqual(show_resp_dict['qoss']['qos']['name'],
self.test_qos_data['qos']['qos_name'])
self.assertEqual(200, show_response.status_int)
# Clean Up - Delete the qos
self.tearDownQos(show_qos_path)
LOG.debug("test_show_qos - END")
def test_show_qosDNE(self, qos_id='100'):
""" Test show qos does not exist"""
LOG.debug("test_show_qosDNE - START")
show_path_temp = self.qos_second_path + qos_id
show_qos_path = str(show_path_temp)
show_response = self.test_app.get(show_qos_path, status='*')
self.assertEqual(452, show_response.status_int)
LOG.debug("test_show_qosDNE - END")
def test_update_qos(self):
""" Test update qos """
LOG.debug("test_update_qos - START")
req_body = jsonutils.dumps(self.test_qos_data)
index_response = self.test_app.post(self.qos_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
rename_req_body = jsonutils.dumps({
'qos': {
'qos_name': 'cisco_rename_qos',
'qos_desc': {
'PPS': 50,
'TTL': 5,
},
},
})
rename_path_temp = (self.qos_second_path +
resp_body['qoss']['qos']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, rename_req_body,
content_type=self.contenttype)
self.assertEqual(200, rename_response.status_int)
rename_resp_dict = wsgi.Serializer().deserialize(rename_response.body,
self.contenttype)
self.assertEqual(rename_resp_dict['qoss']['qos']['name'],
'cisco_rename_qos')
self.tearDownQos(rename_path)
LOG.debug("test_update_qos - END")
def test_update_qosDNE(self, qos_id='100'):
""" Test update qos does not exist """
LOG.debug("test_update_qosDNE - START")
rename_req_body = jsonutils.dumps({
'qos': {
'qos_name': 'cisco_rename_qos',
'qos_desc': {
'PPS': 50,
'TTL': 5,
},
},
})
rename_path_temp = self.qos_second_path + qos_id
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, rename_req_body,
content_type=self.contenttype,
status='*')
self.assertEqual(452, rename_response.status_int)
LOG.debug("test_update_qosDNE - END")
def test_update_qosBADRequest(self):
""" Test update qos bad request """
LOG.debug("test_update_qosBADRequest - START")
req_body = jsonutils.dumps(self.test_qos_data)
index_response = self.test_app.post(self.qos_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
rename_path_temp = (self.qos_second_path +
resp_body['qoss']['qos']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, 'BAD_REQUEST',
status="*")
self.assertEqual(400, rename_response.status_int)
# Clean Up - Delete the Port Profile
self.tearDownQos(rename_path)
LOG.debug("test_update_qosBADRequest - END")
def test_delete_qos(self):
""" Test delte qos """
LOG.debug("test_delete_qos - START")
req_body = jsonutils.dumps({
'qos': {
'qos_name': 'cisco_test_qos',
'qos_desc': {
'PPS': 50,
'TTL': 5,
},
},
})
index_response = self.test_app.post(self.qos_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
delete_path_temp = (self.qos_second_path +
resp_body['qoss']['qos']['id'])
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path)
self.assertEqual(200, delete_response.status_int)
LOG.debug("test_delete_qos - END")
def test_delete_qosDNE(self, qos_id='100'):
""" Test delte qos does not exist"""
LOG.debug("test_delete_qosDNE - START")
delete_path_temp = self.qos_second_path + qos_id
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path, status='*')
self.assertEqual(452, delete_response.status_int)
LOG.debug("test_delete_qosDNE - END")
def tearDownQos(self, delete_profile_path):
""" Tear Down Qos """
self.test_app.delete(delete_profile_path)
def tearDown(self):
db.clear_db()
class CredentialExtensionTest(unittest.TestCase):
def setUp(self):
""" Set up function """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
controller = credential.CredentialController(QuantumManager.
get_plugin())
res_ext = extensions.ResourceExtension('credentials', controller,
parent=parent_resource)
self.test_app = setup_extensions_test_app(SimpleExtensionManager(
res_ext))
self.contenttype = 'application/json'
self.credential_path = '/extensions/csco/tenants/tt/credentials'
self.cred_second_path = '/extensions/csco/tenants/tt/credentials/'
self.test_credential_data = {
'credential': {
'credential_name': 'cred8',
'user_name': 'newUser2',
'password': 'newPasswd1',
},
}
self._l2network_plugin = l2network_plugin.L2Network()
def test_list_credentials(self):
""" Test list credentials """
#Create Credential before listing
LOG.debug("test_list_credentials - START")
req_body1 = jsonutils.dumps(self.test_credential_data)
create_response1 = self.test_app.post(
self.credential_path, req_body1,
content_type=self.contenttype)
req_body2 = jsonutils.dumps({
'credential': {
'credential_name': 'cred9',
'user_name': 'newUser2',
'password': 'newPasswd2',
},
})
create_response2 = self.test_app.post(
self.credential_path, req_body2,
content_type=self.contenttype)
index_response = self.test_app.get(self.credential_path)
index_resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
self.assertEqual(200, index_response.status_int)
#CLean Up - Deletion of the Credentials
resp_body1 = wsgi.Serializer().deserialize(create_response1.body,
self.contenttype)
delete_path1_temp = (self.cred_second_path +
resp_body1['credentials']['credential']['id'])
delete_path1 = str(delete_path1_temp)
resp_body2 = wsgi.Serializer().deserialize(create_response2.body,
self.contenttype)
list_all_credential = [resp_body1['credentials']['credential'],
resp_body2['credentials']['credential']]
self.assertTrue(
index_resp_body['credentials'][0] in list_all_credential)
self.assertTrue(
index_resp_body['credentials'][1] in list_all_credential)
delete_path2_temp = (self.cred_second_path +
resp_body2['credentials']['credential']['id'])
delete_path2 = str(delete_path2_temp)
self.tearDownCredential(delete_path1)
self.tearDownCredential(delete_path2)
LOG.debug("test_list_credentials - END")
def test_create_credential(self):
""" Test create credential """
LOG.debug("test_create_credential - START")
req_body = jsonutils.dumps(self.test_credential_data)
index_response = self.test_app.post(
self.credential_path, req_body,
content_type=self.contenttype)
self.assertEqual(200, index_response.status_int)
#CLean Up - Deletion of the Credentials
resp_body = wsgi.Serializer().deserialize(
index_response.body, self.contenttype)
delete_path_temp = (self.cred_second_path +
resp_body['credentials']['credential']['id'])
delete_path = str(delete_path_temp)
self.tearDownCredential(delete_path)
LOG.debug("test_create_credential - END")
def test_create_credentialBADRequest(self):
""" Test create credential bad request """
LOG.debug("test_create_credentialBADRequest - START")
index_response = self.test_app.post(
self.credential_path, 'BAD_REQUEST',
content_type=self.contenttype, status='*')
self.assertEqual(400, index_response.status_int)
LOG.debug("test_create_credentialBADRequest - END")
def test_show_credential(self):
""" Test show credential """
LOG.debug("test_show_credential - START")
req_body = jsonutils.dumps(self.test_credential_data)
index_response = self.test_app.post(
self.credential_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
show_path_temp = (self.cred_second_path +
resp_body['credentials']['credential']['id'])
show_cred_path = str(show_path_temp)
show_response = self.test_app.get(show_cred_path)
show_resp_dict = wsgi.Serializer().deserialize(show_response.body,
self.contenttype)
self.assertEqual(show_resp_dict['credentials']['credential']['name'],
self.test_credential_data['credential']['user_name'])
self.assertEqual(
show_resp_dict['credentials']['credential']['password'],
self.test_credential_data['credential']['password'])
self.assertEqual(200, show_response.status_int)
LOG.debug("test_show_credential - END")
def test_show_credentialDNE(self, credential_id='100'):
""" Test show credential does not exist """
LOG.debug("test_show_credentialDNE - START")
show_path_temp = self.cred_second_path + credential_id
show_cred_path = str(show_path_temp)
show_response = self.test_app.get(show_cred_path, status='*')
self.assertEqual(451, show_response.status_int)
LOG.debug("test_show_credentialDNE - END")
def test_update_credential(self):
""" Test update credential """
LOG.debug("test_update_credential - START")
req_body = jsonutils.dumps(self.test_credential_data)
index_response = self.test_app.post(
self.credential_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(
index_response.body, self.contenttype)
rename_req_body = jsonutils.dumps({
'credential': {
'credential_name': 'cred3',
'user_name': 'RenamedUser',
'password': 'Renamedpassword',
},
})
rename_path_temp = (self.cred_second_path +
resp_body['credentials']['credential']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, rename_req_body,
content_type=self.contenttype)
rename_resp_dict = wsgi.Serializer().deserialize(rename_response.body,
self.contenttype)
self.assertEqual(rename_resp_dict['credentials']['credential']['name'],
'cred3')
self.assertEqual(
rename_resp_dict['credentials']['credential']['password'],
self.test_credential_data['credential']['password'])
self.assertEqual(200, rename_response.status_int)
# Clean Up - Delete the Credentials
self.tearDownCredential(rename_path)
LOG.debug("test_update_credential - END")
def test_update_credBADReq(self):
""" Test update credential bad request """
LOG.debug("test_update_credBADReq - START")
req_body = jsonutils.dumps(self.test_credential_data)
index_response = self.test_app.post(
self.credential_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(
index_response.body, self.contenttype)
rename_path_temp = (self.cred_second_path +
resp_body['credentials']['credential']['id'])
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, 'BAD_REQUEST',
status='*')
self.assertEqual(400, rename_response.status_int)
LOG.debug("test_update_credBADReq - END")
def test_update_credentialDNE(self, credential_id='100'):
""" Test update credential does not exist"""
LOG.debug("test_update_credentialDNE - START")
rename_req_body = jsonutils.dumps({
'credential': {
'credential_name': 'cred3',
'user_name': 'RenamedUser',
'password': 'Renamedpassword',
},
})
rename_path_temp = self.cred_second_path + credential_id
rename_path = str(rename_path_temp)
rename_response = self.test_app.put(rename_path, rename_req_body,
content_type=self.contenttype,
status='*')
self.assertEqual(451, rename_response.status_int)
LOG.debug("test_update_credentialDNE - END")
def test_delete_credential(self):
""" Test delete credential """
LOG.debug("test_delete_credential - START")
req_body = jsonutils.dumps(self.test_credential_data)
index_response = self.test_app.post(
self.credential_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(
index_response.body, self.contenttype)
delete_path_temp = (self.cred_second_path +
resp_body['credentials']['credential']['id'])
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path)
self.assertEqual(200, delete_response.status_int)
LOG.debug("test_delete_credential - END")
def test_delete_credentialDNE(self, credential_id='100'):
""" Test delete credential does not exist """
LOG.debug("test_delete_credentialDNE - START")
delete_path_temp = self.cred_second_path + credential_id
delete_path = str(delete_path_temp)
delete_response = self.test_app.delete(delete_path, status='*')
self.assertEqual(451, delete_response.status_int)
LOG.debug("test_delete_credentialDNE - END")
def tearDownCredential(self, delete_path):
self.test_app.delete(delete_path)
def tearDown(self):
db.clear_db()
class MultiPortExtensionTest(unittest.TestCase):
def setUp(self):
""" Set up function """
parent_resource = dict(member_name="tenant",
collection_name="extensions/csco/tenants")
controller = multiport.MultiportController(
QuantumManager.get_plugin())
res_ext = extensions.ResourceExtension('multiport', controller,
parent=parent_resource)
self.test_app = setup_extensions_test_app(
SimpleExtensionManager(res_ext))
self.contenttype = 'application/json'
self.multiport_path = '/extensions/csco/tenants/tt/multiport'
self.multiport_path2 = '/extensions/csco/tenants/tt/multiport/'
self.test_multi_port = {
'multiport': {
'net_id_list': '1',
'status': 'test-qos1',
'ports_desc': 'Port Descr',
},
}
self.tenant_id = "test_tenant"
self.network_name = "test_network"
self.api = server.APIRouterV10()
self._l2network_plugin = l2network_plugin.L2Network()
def create_request(self, path, body, content_type, method='GET'):
""" Test create request"""
LOG.debug("test_create_request - START")
req = webob.Request.blank(path)
req.method = method
req.headers = {}
req.headers['Accept'] = content_type
req.body = body
LOG.debug("test_create_request - END")
return req
def _create_network(self, name=None):
""" Test create network"""
LOG.debug("Creating network - START")
if name:
net_name = name
else:
net_name = self.network_name
net_path = "/tenants/tt/networks"
net_data = {'network': {'name': '%s' % net_name}}
req_body = wsgi.Serializer().serialize(net_data, self.contenttype)
network_req = self.create_request(net_path, req_body,
self.contenttype, 'POST')
network_res = network_req.get_response(self.api)
network_data = wsgi.Serializer().deserialize(network_res.body,
self.contenttype)
LOG.debug("Creating network - END")
return network_data['network']['id']
def _delete_network(self, network_id):
""" Delete network """
LOG.debug("Deleting network %s - START", network_id)
network_path = "/tenants/tt/networks/%s" % network_id
network_req = self.create_request(network_path, None,
self.contenttype, 'DELETE')
network_req.get_response(self.api)
LOG.debug("Deleting network - END")
def test_create_multiport(self):
""" Test create MultiPort"""
LOG.debug("test_create_multiport - START")
net_id = self._create_network('net1')
net_id2 = self._create_network('net2')
test_multi_port = {
'multiport': {
'net_id_list': [net_id, net_id2],
'status': 'ACTIVE',
'ports_desc': {
'key': 'value',
},
},
}
req_body = jsonutils.dumps(test_multi_port)
index_response = self.test_app.post(self.multiport_path, req_body,
content_type=self.contenttype)
resp_body = wsgi.Serializer().deserialize(index_response.body,
self.contenttype)
self.assertEqual(200, index_response.status_int)
self.assertEqual(len(test_multi_port['multiport']['net_id_list']),
len(resp_body['ports']))
# Clean Up - Delete the Port Profile
self._delete_network(net_id)
self._delete_network(net_id2)
LOG.debug("test_create_multiport - END")
def test_create_multiportBADRequest(self):
""" Test create MultiPort Bad Request"""
LOG.debug("test_create_multiportBADRequest - START")
net_id = self._create_network('net1')
net_id2 = self._create_network('net2')
index_response = self.test_app.post(self.multiport_path, 'BAD_REQUEST',
content_type=self.contenttype,
status='*')
self.assertEqual(400, index_response.status_int)
# Clean Up - Delete the Port Profile
self._delete_network(net_id)
self._delete_network(net_id2)
LOG.debug("test_create_multiportBADRequest - END")
def tearDown(self):
db.clear_db()
def app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return ExtensionsTestApp(conf)
def setup_extensions_middleware(extension_manager=None):
extension_manager = (extension_manager or
PluginAwareExtensionManager(EXTENSIONS_PATH,
L2Network()))
app = config.load_paste_app('extensions_test_app')
return ExtensionMiddleware(app, ext_mgr=extension_manager)
def setup_extensions_test_app(extension_manager=None):
return TestApp(setup_extensions_middleware(extension_manager))
class SimpleExtensionManager(object):
def __init__(self, resource_ext=None, action_ext=None, request_ext=None):
self.resource_ext = resource_ext
self.action_ext = action_ext
self.request_ext = request_ext
def get_resources(self):
resource_exts = []
if self.resource_ext:
resource_exts.append(self.resource_ext)
return resource_exts
def get_actions(self):
action_exts = []
if self.action_ext:
action_exts.append(self.action_ext)
return action_exts
def get_request_extensions(self):
request_extensions = []
if self.request_ext:
request_extensions.append(self.request_ext)
return request_extensions
| |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import uuid
import json
import bson.json_util as bju
import numpy as np
import copy
# Our imports
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.abstract_timeseries as esta
import emission.net.api.usercache as enau
import emission.core.get_database as edb
import emission.core.wrapper.userlabel as ecul
import emission.core.wrapper.rawtrip as ecwrt
import emission.core.wrapper.section as ecwc
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.entry as ecwe
import emission.tests.storageTests.analysis_ts_common as etsa
import emission.tests.common as etc
class TestTripQueries(unittest.TestCase):
def setUp(self):
self.testUserId = uuid.uuid3(uuid.NAMESPACE_URL, "mailto:test@test.me")
edb.get_analysis_timeseries_db().delete_many({'user_id': self.testUserId})
def tearDown(self):
edb.get_analysis_timeseries_db().delete_many({'user_id': self.testUserId})
edb.get_timeseries_db().delete_many({'user_id': self.testUserId})
edb.get_usercache_db().delete_many({'user_id': self.testUserId})
def create_fake_trip(self):
return etsa.createNewTripLike(self, esda.RAW_TRIP_KEY, ecwrt.Rawtrip)
def testGetTimeRangeForTrip(self):
new_trip = self.create_fake_trip()
ret_tq = esda.get_time_query_for_trip_like(esda.RAW_TRIP_KEY, new_trip.get_id())
self.assertEqual(ret_tq.timeType, "data.ts")
self.assertEqual(ret_tq.startTs, 5)
self.assertEqual(ret_tq.endTs, 6)
def testQuerySectionsForTrip(self):
new_trip = self.create_fake_trip()
new_section = ecwc.Section()
new_section.trip_id = new_trip.get_id()
new_section.start_ts = 5
new_section.end_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_SECTION_KEY, new_section)
ret_entries = esdt.get_raw_sections_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_section])
def testQueryStopsForTrip(self):
new_trip = self.create_fake_trip()
new_stop = ecws.Stop()
new_stop.trip_id = new_trip.get_id()
new_stop.enter_ts = 5
new_stop.exit_ts = 6
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, esda.RAW_STOP_KEY, new_stop)
ret_entries = esdt.get_raw_stops_for_trip(self.testUserId, new_trip.get_id())
self.assertEqual([entry.data for entry in ret_entries], [new_stop])
def testUserInputForTripNoInputs(self):
"""
Test the case in which the user has not provided any inputs
"""
new_trip = self.create_fake_trip()
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId, new_trip.get_id(), "manual/mode_confirm")
self.assertIsNone(user_input)
def testUserInputForTripOneInputFromCache(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "roller_blading"
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
self.assertEqual(new_mce, user_input)
def testUserInputForTripOneInput(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "pogo_sticking"
ts = esta.TimeSeries.get_time_series(self.testUserId)
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
self.assertEqual(new_mc, user_input.data)
def testUserInputForTripTwoInputFromCache(self):
"""
Test the case in which the user has provided exactly one input
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "roller_blading"
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
# WHen there is only one input, it is roller_blading
self.assertEqual(new_mce, user_input)
self.assertEqual(user_input.data.label, 'roller_blading')
new_mc["label"] = 'pogo_sticking'
new_mce = ecwe.Entry.create_entry(self.testUserId, MODE_CONFIRM_KEY, new_mc)
new_mce["metadata"]["type"] = "message"
enau.sync_phone_to_server(self.testUserId, [new_mce])
user_input = esdt.get_user_input_from_cache_series(self.testUserId, new_trip, MODE_CONFIRM_KEY)
# When it is overridden, it is pogo sticking
self.assertEqual(new_mce, user_input)
self.assertEqual(user_input.data.label, 'pogo_sticking')
def testUserInputForTripTwoInput(self):
"""
Test the case in which the user has provided two inputs
"""
MODE_CONFIRM_KEY = "manual/mode_confirm"
ts = esta.TimeSeries.get_time_series(self.testUserId)
new_trip = self.create_fake_trip()
new_mc = ecul.Userlabel()
new_mc["start_ts"] = new_trip.data.start_ts + 1
new_mc["end_ts"] = new_trip.data.end_ts + 1
new_mc["label"] = "car"
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
# WHen there is only one input, it is a car
self.assertEqual(new_mc, user_input.data)
self.assertEqual(user_input.data.label, "car")
new_mc["label"] = "bike"
ts.insert_data(self.testUserId, MODE_CONFIRM_KEY, new_mc)
user_input = esdt.get_user_input_for_trip(esda.RAW_TRIP_KEY, self.testUserId,
new_trip.get_id(), MODE_CONFIRM_KEY)
# When it is overridden, it is a bike
self.assertEqual(new_mc, user_input.data)
self.assertEqual(user_input.data.label, "bike")
def testUserInputRealData(self):
np.random.seed(61297777)
dataFile = "emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12"
etc.setupRealExample(self, dataFile)
self.testUserId = self.testUUID
# At this point, we have only raw data, no trips
etc.runIntakePipeline(self.testUUID)
# At this point, we have trips
# Let's retrieve them
ts = esta.TimeSeries.get_time_series(self.testUUID)
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
self.assertEqual(len(ct_df), 4)
# Now, let's load the mode_confirm and purpose_confirm objects
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.mode_confirm") as mcfp:
mode_confirm_list = json.load(mcfp, object_hook=bju.object_hook)
self.assertEqual(len(mode_confirm_list), 5)
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.purpose_confirm") as pcfp:
purpose_confirm_list = json.load(pcfp, object_hook=bju.object_hook)
self.assertEqual(len(purpose_confirm_list), 7)
for mc in mode_confirm_list:
mc["user_id"] = self.testUUID
ts.insert(mc)
for pc in purpose_confirm_list:
pc["user_id"] = self.testUUID
ts.insert(pc)
mc_label_list = []
pc_label_list = []
for trip_id in ct_df._id:
mc = esdt.get_user_input_for_trip(esda.CLEANED_TRIP_KEY,
self.testUserId, trip_id, "manual/mode_confirm")
mc_label_list.append(mc.data.label)
pc = esdt.get_user_input_for_trip(esda.CLEANED_TRIP_KEY,
self.testUserId, trip_id, "manual/purpose_confirm")
pc_label_list.append(pc.data.label)
self.assertEqual(mc_label_list, 4 * ['bike'])
self.assertEqual(pc_label_list, 4 * ['pick_drop'])
def testUserInputRealDataPostArrival(self):
np.random.seed(61297777)
dataFile = "emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12"
etc.setupRealExample(self, dataFile)
self.testUserId = self.testUUID
# At this point, we have only raw data, no trips
etc.runIntakePipeline(self.testUUID)
# At this point, we have trips
# Let's retrieve them
ts = esta.TimeSeries.get_time_series(self.testUUID)
ct_df = ts.get_data_df("analysis/confirmed_trip", time_query=None)
self.assertEqual(len(ct_df), 4)
mode_fmt_times = list(ct_df.start_fmt_time)
# corresponds to the walk not a trip
# https://github.com/e-mission/e-mission-docs/issues/476#issuecomment-747115640)
mode_fmt_times.insert(3, None)
purpose_fmt_times = copy.copy(mode_fmt_times)
# corresponds to overrides for the same trip
# they are correctly matched to the same trip
# in the final pipeline step, will override the same entry multiple times
purpose_fmt_times.insert(3, purpose_fmt_times[1])
purpose_fmt_times.insert(4, purpose_fmt_times[0])
print("expected_fmt_times: mode = %s" % mode_fmt_times)
print("expected_fmt_times: purpose = %s" % purpose_fmt_times)
# Now, let's load the mode_confirm and purpose_confirm objects
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.mode_confirm") as mcfp:
mode_confirm_list = [ecwe.Entry(mc) for mc in json.load(mcfp, object_hook=bju.object_hook)]
self.assertEqual(len(mode_confirm_list), 5)
with open("emission/tests/data/real_examples/shankari_single_positional_indexer.dec-12.purpose_confirm") as pcfp:
purpose_confirm_list = [ecwe.Entry(pc) for pc in json.load(pcfp, object_hook=bju.object_hook)]
self.assertEqual(len(purpose_confirm_list), 7)
mc_trip_start_fmt_time_list = []
pc_trip_start_fmt_time_list = []
for mode in mode_confirm_list:
mc_trip = esdt.get_trip_for_user_input_obj(ts, mode)
mc_trip_start_fmt_time_list.append(mc_trip.data.start_fmt_time if mc_trip is not None else None)
for purpose in purpose_confirm_list:
pc_trip = esdt.get_trip_for_user_input_obj(ts, purpose)
print("Found pc_trip %s" % pc_trip.data.start_fmt_time if pc_trip is not None else None)
pc_trip_start_fmt_time_list.append(pc_trip.data.start_fmt_time if pc_trip is not None else None)
self.assertEqual(mc_trip_start_fmt_time_list, mode_fmt_times)
self.assertEqual(pc_trip_start_fmt_time_list, purpose_fmt_times)
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from six.moves import http_client
from testtools import matchers
from keystone import catalog
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import test_v3
class CatalogTestCase(test_v3.RestfulTestCase):
"""Test service & endpoint CRUD."""
# region crud tests
def test_create_region_with_id(self):
"""Call ``PUT /regions/{region_id}`` w/o an ID in the request body."""
ref = unit.new_region_ref()
region_id = ref.pop('id')
r = self.put(
'/regions/%s' % region_id,
body={'region': ref},
expected_status=http_client.CREATED)
self.assertValidRegionResponse(r, ref)
# Double-check that the region ID was kept as-is and not
# populated with a UUID, as is the case with POST /v3/regions
self.assertEqual(region_id, r.json['region']['id'])
def test_create_region_with_matching_ids(self):
"""Call ``PUT /regions/{region_id}`` with an ID in the request body."""
ref = unit.new_region_ref()
region_id = ref['id']
r = self.put(
'/regions/%s' % region_id,
body={'region': ref},
expected_status=http_client.CREATED)
self.assertValidRegionResponse(r, ref)
# Double-check that the region ID was kept as-is and not
# populated with a UUID, as is the case with POST /v3/regions
self.assertEqual(region_id, r.json['region']['id'])
def test_create_region_with_duplicate_id(self):
"""Call ``PUT /regions/{region_id}``."""
ref = dict(description="my region")
self.put(
'/regions/myregion',
body={'region': ref}, expected_status=http_client.CREATED)
# Create region again with duplicate id
self.put(
'/regions/myregion',
body={'region': ref}, expected_status=http_client.CONFLICT)
def test_create_region(self):
"""Call ``POST /regions`` with an ID in the request body."""
# the ref will have an ID defined on it
ref = unit.new_region_ref()
r = self.post(
'/regions',
body={'region': ref})
self.assertValidRegionResponse(r, ref)
# we should be able to get the region, having defined the ID ourselves
r = self.get(
'/regions/%(region_id)s' % {
'region_id': ref['id']})
self.assertValidRegionResponse(r, ref)
def test_create_region_with_empty_id(self):
"""Call ``POST /regions`` with an empty ID in the request body."""
ref = unit.new_region_ref(id='')
r = self.post('/regions', body={'region': ref})
self.assertValidRegionResponse(r, ref)
self.assertNotEmpty(r.result['region'].get('id'))
def test_create_region_without_id(self):
"""Call ``POST /regions`` without an ID in the request body."""
ref = unit.new_region_ref()
# instead of defining the ID ourselves...
del ref['id']
# let the service define the ID
r = self.post('/regions', body={'region': ref})
self.assertValidRegionResponse(r, ref)
def test_create_region_without_description(self):
"""Call ``POST /regions`` without description in the request body."""
ref = unit.new_region_ref(description=None)
del ref['description']
r = self.post('/regions', body={'region': ref})
# Create the description in the reference to compare to since the
# response should now have a description, even though we didn't send
# it with the original reference.
ref['description'] = ''
self.assertValidRegionResponse(r, ref)
def test_create_regions_with_same_description_string(self):
"""Call ``POST /regions`` with duplicate descriptions."""
# NOTE(lbragstad): Make sure we can create two regions that have the
# same description.
region_desc = 'Some Region Description'
ref1 = unit.new_region_ref(description=region_desc)
ref2 = unit.new_region_ref(description=region_desc)
resp1 = self.post('/regions', body={'region': ref1})
self.assertValidRegionResponse(resp1, ref1)
resp2 = self.post('/regions', body={'region': ref2})
self.assertValidRegionResponse(resp2, ref2)
def test_create_regions_without_descriptions(self):
"""Call ``POST /regions`` with no description."""
# NOTE(lbragstad): Make sure we can create two regions that have
# no description in the request body. The description should be
# populated by Catalog Manager.
ref1 = unit.new_region_ref()
ref2 = unit.new_region_ref()
del ref1['description']
ref2['description'] = None
resp1 = self.post('/regions', body={'region': ref1})
resp2 = self.post('/regions', body={'region': ref2})
# Create the descriptions in the references to compare to since the
# responses should now have descriptions, even though we didn't send
# a description with the original references.
ref1['description'] = ''
ref2['description'] = ''
self.assertValidRegionResponse(resp1, ref1)
self.assertValidRegionResponse(resp2, ref2)
def test_create_region_with_conflicting_ids(self):
"""Call ``PUT /regions/{region_id}`` with conflicting region IDs."""
# the region ref is created with an ID
ref = unit.new_region_ref()
# but instead of using that ID, make up a new, conflicting one
self.put(
'/regions/%s' % uuid.uuid4().hex,
body={'region': ref},
expected_status=http_client.BAD_REQUEST)
def test_list_head_regions(self):
"""Call ``GET & HEAD /regions``."""
resource_url = '/regions'
r = self.get(resource_url)
self.assertValidRegionListResponse(r, ref=self.region)
self.head(resource_url, expected_status=http_client.OK)
def _create_region_with_parent_id(self, parent_id=None):
ref = unit.new_region_ref(parent_region_id=parent_id)
return self.post(
'/regions',
body={'region': ref})
def test_list_regions_filtered_by_parent_region_id(self):
"""Call ``GET /regions?parent_region_id={parent_region_id}``."""
new_region = self._create_region_with_parent_id()
parent_id = new_region.result['region']['id']
new_region = self._create_region_with_parent_id(parent_id)
new_region = self._create_region_with_parent_id(parent_id)
r = self.get('/regions?parent_region_id=%s' % parent_id)
for region in r.result['regions']:
self.assertEqual(parent_id, region['parent_region_id'])
def test_get_head_region(self):
"""Call ``GET & HEAD /regions/{region_id}``."""
resource_url = '/regions/%(region_id)s' % {
'region_id': self.region_id}
r = self.get(resource_url)
self.assertValidRegionResponse(r, self.region)
self.head(resource_url, expected_status=http_client.OK)
def test_update_region(self):
"""Call ``PATCH /regions/{region_id}``."""
region = unit.new_region_ref()
del region['id']
r = self.patch('/regions/%(region_id)s' % {
'region_id': self.region_id},
body={'region': region})
self.assertValidRegionResponse(r, region)
def test_update_region_without_description_keeps_original(self):
"""Call ``PATCH /regions/{region_id}``."""
region_ref = unit.new_region_ref()
resp = self.post('/regions', body={'region': region_ref})
region_updates = {
# update with something that's not the description
'parent_region_id': self.region_id,
}
resp = self.patch('/regions/%s' % region_ref['id'],
body={'region': region_updates})
# NOTE(dstanek): Keystone should keep the original description.
self.assertEqual(region_ref['description'],
resp.result['region']['description'])
def test_update_region_with_null_description(self):
"""Call ``PATCH /regions/{region_id}``."""
region = unit.new_region_ref(description=None)
del region['id']
r = self.patch('/regions/%(region_id)s' % {
'region_id': self.region_id},
body={'region': region})
# NOTE(dstanek): Keystone should turn the provided None value into
# an empty string before storing in the backend.
region['description'] = ''
self.assertValidRegionResponse(r, region)
def test_delete_region(self):
"""Call ``DELETE /regions/{region_id}``."""
ref = unit.new_region_ref()
r = self.post(
'/regions',
body={'region': ref})
self.assertValidRegionResponse(r, ref)
self.delete('/regions/%(region_id)s' % {
'region_id': ref['id']})
# service crud tests
def test_create_service(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref()
r = self.post(
'/services',
body={'service': ref})
self.assertValidServiceResponse(r, ref)
def test_create_service_no_name(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref()
del ref['name']
r = self.post(
'/services',
body={'service': ref})
ref['name'] = ''
self.assertValidServiceResponse(r, ref)
def test_create_service_no_enabled(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref()
del ref['enabled']
r = self.post(
'/services',
body={'service': ref})
ref['enabled'] = True
self.assertValidServiceResponse(r, ref)
self.assertIs(True, r.result['service']['enabled'])
def test_create_service_enabled_false(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref(enabled=False)
r = self.post(
'/services',
body={'service': ref})
self.assertValidServiceResponse(r, ref)
self.assertIs(False, r.result['service']['enabled'])
def test_create_service_enabled_true(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref(enabled=True)
r = self.post(
'/services',
body={'service': ref})
self.assertValidServiceResponse(r, ref)
self.assertIs(True, r.result['service']['enabled'])
def test_create_service_enabled_str_true(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref(enabled='True')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_service_enabled_str_false(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref(enabled='False')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_service_enabled_str_random(self):
"""Call ``POST /services``."""
ref = unit.new_service_ref(enabled='puppies')
self.post('/services', body={'service': ref},
expected_status=http_client.BAD_REQUEST)
def test_list_head_services(self):
"""Call ``GET & HEAD /services``."""
resource_url = '/services'
r = self.get(resource_url)
self.assertValidServiceListResponse(r, ref=self.service)
self.head(resource_url, expected_status=http_client.OK)
def _create_random_service(self):
ref = unit.new_service_ref()
response = self.post(
'/services',
body={'service': ref})
return response.json['service']
def test_filter_list_services_by_type(self):
"""Call ``GET /services?type=<some type>``."""
target_ref = self._create_random_service()
# create unrelated services
self._create_random_service()
self._create_random_service()
response = self.get('/services?type=' + target_ref['type'])
self.assertValidServiceListResponse(response, ref=target_ref)
filtered_service_list = response.json['services']
self.assertEqual(1, len(filtered_service_list))
filtered_service = filtered_service_list[0]
self.assertEqual(target_ref['type'], filtered_service['type'])
def test_filter_list_services_by_name(self):
"""Call ``GET /services?name=<some name>``."""
# create unrelated services
self._create_random_service()
self._create_random_service()
# create the desired service
target_ref = self._create_random_service()
response = self.get('/services?name=' + target_ref['name'])
self.assertValidServiceListResponse(response, ref=target_ref)
filtered_service_list = response.json['services']
self.assertEqual(1, len(filtered_service_list))
filtered_service = filtered_service_list[0]
self.assertEqual(target_ref['name'], filtered_service['name'])
def test_filter_list_services_by_name_with_list_limit(self):
"""Call ``GET /services?name=<some name>``."""
self.config_fixture.config(list_limit=1)
self.test_filter_list_services_by_name()
def test_get_head_service(self):
"""Call ``GET & HEAD /services/{service_id}``."""
resource_url = '/services/%(service_id)s' % {
'service_id': self.service_id}
r = self.get(resource_url)
self.assertValidServiceResponse(r, self.service)
self.head(resource_url, expected_status=http_client.OK)
def test_update_service(self):
"""Call ``PATCH /services/{service_id}``."""
service = unit.new_service_ref()
del service['id']
r = self.patch('/services/%(service_id)s' % {
'service_id': self.service_id},
body={'service': service})
self.assertValidServiceResponse(r, service)
def test_delete_service(self):
"""Call ``DELETE /services/{service_id}``."""
self.delete('/services/%(service_id)s' % {
'service_id': self.service_id})
# endpoint crud tests
def test_list_head_endpoints(self):
"""Call ``GET & HEAD /endpoints``."""
resource_url = '/endpoints'
r = self.get(resource_url)
self.assertValidEndpointListResponse(r, ref=self.endpoint)
self.head(resource_url, expected_status=http_client.OK)
def _create_random_endpoint(self, interface='public',
parent_region_id=None):
region = self._create_region_with_parent_id(
parent_id=parent_region_id)
service = self._create_random_service()
ref = unit.new_endpoint_ref(
service_id=service['id'],
interface=interface,
region_id=region.result['region']['id'])
response = self.post(
'/endpoints',
body={'endpoint': ref})
return response.json['endpoint']
def test_list_endpoints_filtered_by_interface(self):
"""Call ``GET /endpoints?interface={interface}``."""
ref = self._create_random_endpoint(interface='internal')
response = self.get('/endpoints?interface=%s' % ref['interface'])
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['interface'], endpoint['interface'])
def test_list_endpoints_filtered_by_service_id(self):
"""Call ``GET /endpoints?service_id={service_id}``."""
ref = self._create_random_endpoint()
response = self.get('/endpoints?service_id=%s' % ref['service_id'])
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['service_id'], endpoint['service_id'])
def test_list_endpoints_filtered_by_region_id(self):
"""Call ``GET /endpoints?region_id={region_id}``."""
ref = self._create_random_endpoint()
response = self.get('/endpoints?region_id=%s' % ref['region_id'])
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['region_id'], endpoint['region_id'])
def test_list_endpoints_filtered_by_parent_region_id(self):
"""Call ``GET /endpoints?region_id={region_id}``.
Ensure passing the parent_region_id as filter returns an
empty list.
"""
parent_region = self._create_region_with_parent_id()
parent_region_id = parent_region.result['region']['id']
self._create_random_endpoint(parent_region_id=parent_region_id)
response = self.get('/endpoints?region_id=%s' % parent_region_id)
self.assertEqual(0, len(response.json['endpoints']))
def test_list_endpoints_with_multiple_filters(self):
"""Call ``GET /endpoints?interface={interface}...``.
Ensure passing different combinations of interface, region_id and
service_id as filters will return the correct result.
"""
# interface and region_id specified
ref = self._create_random_endpoint(interface='internal')
response = self.get('/endpoints?interface=%s®ion_id=%s' %
(ref['interface'], ref['region_id']))
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['interface'], endpoint['interface'])
self.assertEqual(ref['region_id'], endpoint['region_id'])
# interface and service_id specified
ref = self._create_random_endpoint(interface='internal')
response = self.get('/endpoints?interface=%s&service_id=%s' %
(ref['interface'], ref['service_id']))
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['interface'], endpoint['interface'])
self.assertEqual(ref['service_id'], endpoint['service_id'])
# region_id and service_id specified
ref = self._create_random_endpoint(interface='internal')
response = self.get('/endpoints?region_id=%s&service_id=%s' %
(ref['region_id'], ref['service_id']))
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['region_id'], endpoint['region_id'])
self.assertEqual(ref['service_id'], endpoint['service_id'])
# interface, region_id and service_id specified
ref = self._create_random_endpoint(interface='internal')
response = self.get(('/endpoints?interface=%s®ion_id=%s'
'&service_id=%s') %
(ref['interface'], ref['region_id'],
ref['service_id']))
self.assertValidEndpointListResponse(response, ref=ref)
for endpoint in response.json['endpoints']:
self.assertEqual(ref['interface'], endpoint['interface'])
self.assertEqual(ref['region_id'], endpoint['region_id'])
self.assertEqual(ref['service_id'], endpoint['service_id'])
def test_list_endpoints_with_random_filter_values(self):
"""Call ``GET /endpoints?interface={interface}...``.
Ensure passing random values for: interface, region_id and
service_id will return an empty list.
"""
self._create_random_endpoint(interface='internal')
response = self.get('/endpoints?interface=%s' % uuid.uuid4().hex)
self.assertEqual(0, len(response.json['endpoints']))
response = self.get('/endpoints?region_id=%s' % uuid.uuid4().hex)
self.assertEqual(0, len(response.json['endpoints']))
response = self.get('/endpoints?service_id=%s' % uuid.uuid4().hex)
self.assertEqual(0, len(response.json['endpoints']))
def test_create_endpoint_no_enabled(self):
"""Call ``POST /endpoints``."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id)
r = self.post('/endpoints', body={'endpoint': ref})
ref['enabled'] = True
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_true(self):
"""Call ``POST /endpoints`` with enabled: true."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id,
enabled=True)
r = self.post('/endpoints', body={'endpoint': ref})
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_false(self):
"""Call ``POST /endpoints`` with enabled: false."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id,
enabled=False)
r = self.post('/endpoints', body={'endpoint': ref})
self.assertValidEndpointResponse(r, ref)
def test_create_endpoint_enabled_str_true(self):
"""Call ``POST /endpoints`` with enabled: 'True'."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id,
enabled='True')
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_enabled_str_false(self):
"""Call ``POST /endpoints`` with enabled: 'False'."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id,
enabled='False')
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_enabled_str_random(self):
"""Call ``POST /endpoints`` with enabled: 'puppies'."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id,
enabled='puppies')
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_with_invalid_region_id(self):
"""Call ``POST /endpoints``."""
ref = unit.new_endpoint_ref(service_id=self.service_id)
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_create_endpoint_with_region(self):
"""EndpointV3 creates the region before creating the endpoint.
This occurs when endpoint is provided with 'region' and no 'region_id'.
"""
ref = unit.new_endpoint_ref_with_region(service_id=self.service_id,
region=uuid.uuid4().hex)
self.post('/endpoints', body={'endpoint': ref})
# Make sure the region is created
self.get('/regions/%(region_id)s' % {'region_id': ref["region"]})
def test_create_endpoint_with_no_region(self):
"""EndpointV3 allows to creates the endpoint without region."""
ref = unit.new_endpoint_ref(service_id=self.service_id, region_id=None)
del ref['region_id'] # cannot just be None, it needs to not exist
self.post('/endpoints', body={'endpoint': ref})
def test_create_endpoint_with_empty_url(self):
"""Call ``POST /endpoints``."""
ref = unit.new_endpoint_ref(service_id=self.service_id, url='')
self.post('/endpoints', body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
def test_get_head_endpoint(self):
"""Call ``GET & HEAD /endpoints/{endpoint_id}``."""
resource_url = '/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id}
r = self.get(resource_url)
self.assertValidEndpointResponse(r, self.endpoint)
self.head(resource_url, expected_status=http_client.OK)
def test_update_endpoint(self):
"""Call ``PATCH /endpoints/{endpoint_id}``."""
ref = unit.new_endpoint_ref(service_id=self.service_id,
interface='public',
region_id=self.region_id)
del ref['id']
r = self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': ref})
ref['enabled'] = True
self.assertValidEndpointResponse(r, ref)
def test_update_endpoint_enabled_true(self):
"""Call ``PATCH /endpoints/{endpoint_id}`` with enabled: True."""
r = self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': {'enabled': True}})
self.assertValidEndpointResponse(r, self.endpoint)
def test_update_endpoint_enabled_false(self):
"""Call ``PATCH /endpoints/{endpoint_id}`` with enabled: False."""
r = self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': {'enabled': False}})
exp_endpoint = copy.copy(self.endpoint)
exp_endpoint['enabled'] = False
self.assertValidEndpointResponse(r, exp_endpoint)
def test_update_endpoint_enabled_str_true(self):
"""Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'True'."""
self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': {'enabled': 'True'}},
expected_status=http_client.BAD_REQUEST)
def test_update_endpoint_enabled_str_false(self):
"""Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'False'."""
self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': {'enabled': 'False'}},
expected_status=http_client.BAD_REQUEST)
def test_update_endpoint_enabled_str_random(self):
"""Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'kitties'."""
self.patch(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id},
body={'endpoint': {'enabled': 'kitties'}},
expected_status=http_client.BAD_REQUEST)
def test_delete_endpoint(self):
"""Call ``DELETE /endpoints/{endpoint_id}``."""
self.delete(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
def test_create_endpoint_on_v2(self):
# clear the v3 endpoint so we only have endpoints created on v2
self.delete(
'/endpoints/%(endpoint_id)s' % {
'endpoint_id': self.endpoint_id})
# create a v3 endpoint ref, and then tweak it back to a v2-style ref
ref = unit.new_endpoint_ref_with_region(service_id=self.service['id'],
region=uuid.uuid4().hex,
internalurl=None)
del ref['id']
del ref['interface']
ref['publicurl'] = ref.pop('url')
# don't set adminurl to ensure it's absence is handled like internalurl
# create the endpoint on v2 (using a v3 token)
r = self.admin_request(
method='POST',
path='/v2.0/endpoints',
token=self.get_scoped_token(),
body={'endpoint': ref})
endpoint_v2 = r.result['endpoint']
# test the endpoint on v3
r = self.get('/endpoints')
endpoints = self.assertValidEndpointListResponse(r)
self.assertEqual(1, len(endpoints))
endpoint_v3 = endpoints.pop()
# these attributes are identical between both APIs
self.assertEqual(ref['region'], endpoint_v3['region_id'])
self.assertEqual(ref['service_id'], endpoint_v3['service_id'])
self.assertEqual(ref['description'], endpoint_v3['description'])
# a v2 endpoint is not quite the same concept as a v3 endpoint, so they
# receive different identifiers
self.assertNotEqual(endpoint_v2['id'], endpoint_v3['id'])
# v2 has a publicurl; v3 has a url + interface type
self.assertEqual(ref['publicurl'], endpoint_v3['url'])
self.assertEqual('public', endpoint_v3['interface'])
# tests for bug 1152632 -- these attributes were being returned by v3
self.assertNotIn('publicurl', endpoint_v3)
self.assertNotIn('adminurl', endpoint_v3)
self.assertNotIn('internalurl', endpoint_v3)
# test for bug 1152635 -- this attribute was being returned by v3
self.assertNotIn('legacy_endpoint_id', endpoint_v3)
self.assertEqual(endpoint_v2['region'], endpoint_v3['region_id'])
def test_deleting_endpoint_with_space_in_url(self):
# add a space to all urls (intentional "i d" to test bug)
url_with_space = "http://127.0.0.1:8774 /v1.1/\$(tenant_i d)s"
# create a v3 endpoint ref
ref = unit.new_endpoint_ref(service_id=self.service['id'],
region_id=None,
publicurl=url_with_space,
internalurl=url_with_space,
adminurl=url_with_space,
url=url_with_space)
# add the endpoint to the database
self.catalog_api.create_endpoint(ref['id'], ref)
# delete the endpoint
self.delete('/endpoints/%s' % ref['id'])
# make sure it's deleted (GET should return Not Found)
self.get('/endpoints/%s' % ref['id'],
expected_status=http_client.NOT_FOUND)
def test_endpoint_create_with_valid_url(self):
"""Create endpoint with valid url should be tested,too."""
# list one valid url is enough, no need to list too much
valid_url = 'http://127.0.0.1:8774/v1.1/$(tenant_id)s'
ref = unit.new_endpoint_ref(self.service_id,
interface='public',
region_id=self.region_id,
url=valid_url)
self.post('/endpoints', body={'endpoint': ref})
def test_endpoint_create_with_valid_url_project_id(self):
"""Create endpoint with valid url should be tested,too."""
valid_url = 'http://127.0.0.1:8774/v1.1/$(project_id)s'
ref = unit.new_endpoint_ref(self.service_id,
interface='public',
region_id=self.region_id,
url=valid_url)
self.post('/endpoints', body={'endpoint': ref})
def test_endpoint_create_with_invalid_url(self):
"""Test the invalid cases: substitutions is not exactly right."""
invalid_urls = [
# using a substitution that is not whitelisted - KeyError
'http://127.0.0.1:8774/v1.1/$(nonexistent)s',
# invalid formatting - ValueError
'http://127.0.0.1:8774/v1.1/$(tenant_id)',
'http://127.0.0.1:8774/v1.1/$(tenant_id)t',
'http://127.0.0.1:8774/v1.1/$(tenant_id',
# invalid type specifier - TypeError
# admin_url is a string not an int
'http://127.0.0.1:8774/v1.1/$(admin_url)d',
]
ref = unit.new_endpoint_ref(self.service_id)
for invalid_url in invalid_urls:
ref['url'] = invalid_url
self.post('/endpoints',
body={'endpoint': ref},
expected_status=http_client.BAD_REQUEST)
class TestCatalogAPISQL(unit.TestCase):
"""Test for the catalog Manager against the SQL backend."""
def setUp(self):
super(TestCatalogAPISQL, self).setUp()
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
service = unit.new_service_ref()
self.service_id = service['id']
self.catalog_api.create_service(self.service_id, service)
self.create_endpoint(service_id=self.service_id)
def create_endpoint(self, service_id, **kwargs):
endpoint = unit.new_endpoint_ref(service_id=service_id,
region_id=None, **kwargs)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
return endpoint
def config_overrides(self):
super(TestCatalogAPISQL, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# the only endpoint in the catalog is the one created in setUp
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog[0]['endpoints']))
# it's also the only endpoint in the backend
self.assertEqual(1, len(self.catalog_api.list_endpoints()))
# create a new, invalid endpoint - malformed type declaration
self.create_endpoint(self.service_id,
url='http://keystone/%(tenant_id)')
# create a new, invalid endpoint - nonexistent key
self.create_endpoint(self.service_id,
url='http://keystone/%(you_wont_find_me)s')
# verify that the invalid endpoints don't appear in the catalog
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertEqual(1, len(catalog[0]['endpoints']))
# all three appear in the backend
self.assertEqual(3, len(self.catalog_api.list_endpoints()))
# create another valid endpoint - tenant_id will be replaced
self.create_endpoint(self.service_id,
url='http://keystone/%(tenant_id)s')
# there are two valid endpoints, positive check
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2))
# If the URL has no 'tenant_id' to substitute, we will skip the
# endpoint which contains this kind of URL, negative check.
tenant_id = None
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1))
def test_get_catalog_always_returns_service_name(self):
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
# create a service, with a name
named_svc = unit.new_service_ref()
self.catalog_api.create_service(named_svc['id'], named_svc)
self.create_endpoint(service_id=named_svc['id'])
# create a service, with no name
unnamed_svc = unit.new_service_ref(name=None)
del unnamed_svc['name']
self.catalog_api.create_service(unnamed_svc['id'], unnamed_svc)
self.create_endpoint(service_id=unnamed_svc['id'])
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
named_endpoint = [ep for ep in catalog
if ep['type'] == named_svc['type']][0]
self.assertEqual(named_svc['name'], named_endpoint['name'])
unnamed_endpoint = [ep for ep in catalog
if ep['type'] == unnamed_svc['type']][0]
self.assertEqual('', unnamed_endpoint['name'])
# TODO(dstanek): this needs refactoring with the test above, but we are in a
# crunch so that will happen in a future patch.
class TestCatalogAPISQLRegions(unit.TestCase):
"""Test for the catalog Manager against the SQL backend."""
def setUp(self):
super(TestCatalogAPISQLRegions, self).setUp()
self.useFixture(database.Database())
self.catalog_api = catalog.Manager()
def config_overrides(self):
super(TestCatalogAPISQLRegions, self).config_overrides()
self.config_fixture.config(group='catalog', driver='sql')
def test_get_catalog_returns_proper_endpoints_with_no_region(self):
service = unit.new_service_ref()
service_id = service['id']
self.catalog_api.create_service(service_id, service)
endpoint = unit.new_endpoint_ref(service_id=service_id,
region_id=None)
del endpoint['region_id']
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertValidCatalogEndpoint(
catalog[0]['endpoints'][0], ref=endpoint)
def test_get_catalog_returns_proper_endpoints_with_region(self):
service = unit.new_service_ref()
service_id = service['id']
self.catalog_api.create_service(service_id, service)
endpoint = unit.new_endpoint_ref(service_id=service_id)
region = unit.new_region_ref(id=endpoint['region_id'])
self.catalog_api.create_region(region)
self.catalog_api.create_endpoint(endpoint['id'], endpoint)
endpoint = self.catalog_api.get_endpoint(endpoint['id'])
user_id = uuid.uuid4().hex
tenant_id = uuid.uuid4().hex
catalog = self.catalog_api.get_v3_catalog(user_id, tenant_id)
self.assertValidCatalogEndpoint(
catalog[0]['endpoints'][0], ref=endpoint)
def assertValidCatalogEndpoint(self, entity, ref=None):
keys = ['description', 'id', 'interface', 'name', 'region_id', 'url']
for k in keys:
self.assertEqual(ref.get(k), entity[k], k)
self.assertEqual(entity['region_id'], entity['region'])
class TestCatalogAPITemplatedProject(test_v3.RestfulTestCase):
"""Templated Catalog doesn't support full API.
Eg. No region/endpoint creation.
"""
def config_overrides(self):
super(TestCatalogAPITemplatedProject, self).config_overrides()
self.config_fixture.config(group='catalog', driver='templated')
def load_fixtures(self, fixtures):
self.load_sample_data(create_region_and_endpoints=False)
def test_project_delete(self):
"""Deleting a project should not result in an 500 ISE.
Deleting a project will create a notification, which the EndpointFilter
functionality will use to clean up any project->endpoint and
project->endpoint_group relationships. The templated catalog does not
support such relationships, but the act of attempting to delete them
should not cause a NotImplemented exception to be exposed to an API
caller.
Deleting an endpoint has a similar notification and clean up
mechanism, but since we do not allow deletion of endpoints with the
templated catalog, there is no testing to do for that action.
"""
self.delete(
'/projects/%(project_id)s' % {
'project_id': self.project_id})
| |
# Author: Mengye Ren (mren@cs.toronto.edu).
#
# Modified from Tensorflow original code.
# Original Tensorflow license shown below.
# =============================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""ResNet model.
Related papers:
https://arxiv.org/pdf/1603.05027v2.pdf
https://arxiv.org/pdf/1512.03385v1.pdf
https://arxiv.org/pdf/1605.07146v1.pdf
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from resnet.models import nnlib as nn
from resnet.utils import logger
log = logger.get()
class ResNetModel(object):
"""ResNet model."""
def __init__(self,
config,
is_training=True,
inference_only=False,
inp=None,
label=None):
"""ResNet constructor.
Args:
config: Hyperparameters.
is_training: One of "train" and "eval".
inference_only: Do not build optimizer.
"""
self._config = config
self._l1_collection = []
self.is_training = is_training
# Input.
if inp is None:
x = tf.placeholder(
tf.float32, [None, config.height, config.width, config.num_channel])
else:
x = inp
if label is None:
y = tf.placeholder(tf.int32, [None])
else:
y = label
logits = self.build_inference_network(x)
predictions = tf.nn.softmax(logits)
with tf.variable_scope("costs"):
xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels= y)
xent = tf.reduce_sum(xent, name="xent") / tf.to_float(tf.shape(x)[0])
cost = xent
cost += self._decay()
cost += self._l1_loss()
self._cost = cost
self._input = x
self._label = y
self._cross_ent = xent
self._output = predictions
if not is_training or inference_only:
return
global_step = tf.Variable(0.0, name="global_step", trainable=False)
lr = tf.Variable(0.0, name="learn_rate", trainable=False)
trainable_variables = tf.trainable_variables()
grads = tf.gradients(cost, trainable_variables)
if config.optimizer == "sgd":
optimizer = tf.train.GradientDescentOptimizer(lr)
elif config.optimizer == "mom":
optimizer = tf.train.MomentumOptimizer(lr, 0.9)
train_op = optimizer.apply_gradients(
zip(grads, trainable_variables),
global_step=global_step,
name="train_step")
self._train_op = train_op
self._global_step = global_step
self._lr = lr
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
"""Assigns new learning rate."""
log.info("Adjusting learning rate to {}".format(lr_value))
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def cost(self):
return self._cost
@property
def train_op(self):
return self._train_op
@property
def config(self):
return self._config
@property
def lr(self):
return self._lr
@property
def input(self):
return self._input
@property
def output(self):
return self._output
@property
def label(self):
return self._label
@property
def cross_ent(self):
return self._cross_ent
@property
def global_step(self):
return self._global_step
@property
def l1_collection(self):
return self._l1_collection
def build_inference_network(self, x):
config = self.config
is_training = self.is_training
num_stages = len(self.config.num_residual_units)
strides = config.strides
activate_before_residual = config.activate_before_residual
filters = [ff for ff in config.filters] # Copy filter config.
init_filter = config.init_filter
with tf.variable_scope("init"):
h = self._conv("init_conv", x, init_filter,
int(x.get_shape()[-1]), filters[0],
self._stride_arr(config.init_stride))
# Max-pooling is used in ImageNet experiments to further reduce
# dimensionality.
if config.init_max_pool:
h = tf.nn.max_pool(h, [1, 3, 3, 1], [1, 2, 2, 1], "SAME")
if config.use_bottleneck:
res_func = self._bottleneck_residual
# For CIFAR-10 it's [16, 16, 32, 64] => [16, 64, 128, 256]
for ii in range(1, len(filters)):
filters[ii] *= 4
else:
res_func = self._residual
for ss in range(num_stages):
with tf.variable_scope("unit_{}_0".format(ss + 1)):
h = res_func(
h,
filters[ss],
filters[ss + 1],
self._stride_arr(strides[ss]),
activate_before_residual=activate_before_residual[ss])
for ii in range(1, config.num_residual_units[ss]):
with tf.variable_scope("unit_{}_{}".format(ss + 1, ii)):
h = res_func(
h,
filters[ss + 1],
filters[ss + 1],
self._stride_arr(1),
activate_before_residual=False)
with tf.variable_scope("unit_last"):
h = self._batch_norm("final_bn", h)
h = self._relu(h, config.relu_leakiness)
h = self._global_avg_pool(h)
with tf.variable_scope("logit"):
logits = self._fully_connected(h, config.num_classes)
return logits
def _stride_arr(self, stride):
"""Map a stride scalar to the stride array for tf.nn.conv2d."""
return [1, stride, stride, 1]
def _batch_norm(self, name, x):
"""Batch normalization."""
with tf.variable_scope(name):
n_out = x.get_shape()[-1]
beta = nn.weight_variable(
[n_out], init_method="constant", init_param={"val": 0.0}, name="beta")
gamma = nn.weight_variable(
[n_out],
init_method="constant",
init_param={"val": 1.0},
name="gamma")
return nn.batch_norm(
x,
self.is_training,
gamma=gamma,
beta=beta,
axes=[0, 1, 2],
eps=1e-3,
scope="bn",
name="bn_out")
def _residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Residual unit with 2 sub layers."""
if activate_before_residual:
with tf.variable_scope("shared_activation"):
x = self._batch_norm("init_bn", x)
x = self._relu(x, self.config.relu_leakiness)
orig_x = x
else:
with tf.variable_scope("residual_only_activation"):
orig_x = x
x = self._batch_norm("init_bn", x)
x = self._relu(x, self.config.relu_leakiness)
with tf.variable_scope("sub1"):
x = self._conv("conv1", x, 3, in_filter, out_filter, stride)
with tf.variable_scope("sub2"):
x = self._batch_norm("bn2", x)
x = self._relu(x, self.config.relu_leakiness)
x = self._conv("conv2", x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope("sub_add"):
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, "VALID")
orig_x = tf.pad(
orig_x,
[[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2, (out_filter - in_filter) // 2]])
x += orig_x
log.info("Activation after unit {}".format(
[int(ss) for ss in x.get_shape()[1:]]))
return x
def _bottleneck_residual(self,
x,
in_filter,
out_filter,
stride,
activate_before_residual=False):
"""Bottleneck resisual unit with 3 sub layers."""
if activate_before_residual:
with tf.variable_scope("common_bn_relu"):
x = self._batch_norm("init_bn", x)
x = self._relu(x, self.config.relu_leakiness)
orig_x = x
else:
with tf.variable_scope("residual_bn_relu"):
orig_x = x
x = self._batch_norm("init_bn", x)
x = self._relu(x, self.config.relu_leakiness)
with tf.variable_scope("sub1"):
x = self._conv("conv1", x, 1, in_filter, out_filter / 4, stride)
with tf.variable_scope("sub2"):
x = self._batch_norm("bn2", x)
x = self._relu(x, self.config.relu_leakiness)
x = self._conv("conv2", x, 3, out_filter / 4, out_filter / 4,
[1, 1, 1, 1])
with tf.variable_scope("sub3"):
x = self._batch_norm("bn3", x)
x = self._relu(x, self.config.relu_leakiness)
x = self._conv("conv3", x, 1, out_filter / 4, out_filter, [1, 1, 1, 1])
with tf.variable_scope("sub_add"):
if in_filter != out_filter:
orig_x = self._conv("project", orig_x, 1, in_filter, out_filter, stride)
x += orig_x
log.info("Activation after unit {}".format(
[int(ss) for ss in x.get_shape()[1:]]))
return x
def _decay(self):
"""L2 weight decay loss."""
wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
log.info("Weight decay variables: {}".format(wd_losses))
log.info("Total length: {}".format(len(wd_losses)))
if len(wd_losses) > 0:
return tf.add_n(wd_losses)
else:
log.warning("No weight decay variables!")
return 0.0
def _l1_loss(self):
"""L1 activation loss."""
# l1_reg_losses = tf.get_collection(L1_REG_KEY)
if len(self.l1_collection) > 0:
log.warning("L1 Regularizers {}".format(self.l1_collection))
return tf.add_n(self.l1_collection)
else:
log.warning("No L1 loss variables!")
return 0.0
def _conv(self, name, x, filter_size, in_filters, out_filters, strides):
"""Convolution."""
with tf.variable_scope(name):
n = filter_size * filter_size * out_filters
kernel = nn.weight_variable(
[filter_size, filter_size, in_filters, out_filters],
init_method="truncated_normal",
init_param={"mean": 0,
"stddev": np.sqrt(2.0 / n)},
wd=self.config.wd,
name="w")
return tf.nn.conv2d(x, kernel, strides, padding="SAME")
def _relu(self, x, leakiness=0.0):
"""Relu, with optional leaky support."""
return tf.where(tf.less(x, 0.0), leakiness * x, x, name="leaky_relu")
def _fully_connected(self, x, out_dim):
"""FullyConnected layer for final output."""
x_shape = x.get_shape()
d = x_shape[1]
w = nn.weight_variable(
[d, out_dim],
init_method="uniform_scaling",
init_param={"factor": 1.0},
wd=self.config.wd,
name="w")
b = nn.weight_variable(
[out_dim], init_method="constant", init_param={"val": 0.0}, name="b")
return tf.nn.xw_plus_b(x, w, b)
def _global_avg_pool(self, x):
assert x.get_shape().ndims == 4
return tf.reduce_mean(x, [1, 2])
def infer_step(self, sess, inp):
"""Run inference."""
return sess.run(self.output, feed_dict={self.input: inp})
| |
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("Ah+LCAAAAAAABADtmVc4nG/378eM3vuI3oIUDBmjRY/eJaL3SHRGLzEE0UWCJKJEjQhiCBGd0aJGDZMYwahRRtTBGLZf+R+8Z/vk3de1r73vk/u71vo833U/1zpcfCmU"
+ "gL8OKWAbM59ivxbUuxvlKvhD3K3C+tanWiVKJkibLzKHRuD1K34xKraiw5eZHW1CNz5pT5yFjsp7qnhur50g+L1LoDstR8qeBalxSwXfELsJdV87FFoWK5Hfzg8lL/bn"
+ "o86QF0qE1xeESb4L/6ODOZWLURXi7uijeUQkOYAKcbxIEUIKkFtmmKm+mBk1Zjj+oUQKAKg6pJMA/mtHdXPLAicM8Euvi8WckNxcW/dMAXMBMk9nwJJQABm3JHK2SZak"
+ "kA8fsRmM+whw6yQWK7yMMAYINjkYKjZakPPHEVwD8yJiAXkYX34evAgJ7Rpi+x58h62TAxdiKINHkLAGdDwghyOvqFLhK11v4/tIxJrmxZRxIyBGvw4dwUY+YDQRs5PO"
+ "j9MmcVTERwhlEIoAcvmESKZhohsg77LdZctEAKabGLPeTSwFYAaJHut9xFbA42Gi0fogEQN4vIDgql9FhAJCE6gaCwRqoHgaR16cKFXVQOTJPlzF1FEGl1uQRagFDvnx"
+ "ETAYY7DlE8I38AKipxODAbfT4mPJH660b39uLIBBhPB2kv3E/idNjZLhVjgKBoPuyGH9AJXHjuQ4RXKk9AIjnmxJKYQhugHzNSUX4w1Q8J8XV17xIR1LJTyAK4ZIRmth"
+ "1pIVmypJoIvK5pghOwb+AoIeHBiiG+2GA/UmYTRJzJp8WWwaZ4F3/PlEYP5gcp2ldt2tRR+65z2R6eihOW7+pwR2eGKLsioAfwUrFhILkMXTqXfZCXX2RyLIsrkskwkH"
+ "UHR3xFc84GCpvYWIHtr2wAOX4fMYCa91Eg0waXbTjjfXq89Pwqw6ITgbxctJyTaFwKXxxdFMmJBgavwz3Oqv4WsYReBgU/qWGo4sv7OtmKIJTHrPf36mDTP0Y7ANGdH0"
+ "GpQF55uZw+T+GGr70tIkC5ryc5jextiY+u38ocd9BvjjSppfRlxTwBn+GmxjVR2ONPvih7zzrW1stEkWKNa08/suLpLVH3l0eQGu42xOsyO4VjA055dXdBzhuY8f0pkb"
+ "X+brvwOUG4680wH3dYbi26OWfpH6pRJilP12UL1teAZMBIDer2A0Aice2vTlIgQnA1BsMl7jwdNLNxZ0KeJ4AC2NDGups00ooti3UM71/kiZvBFRzGisaEyw3Bud2+wL"
+ "gbb613cyF8cLGSSuco1ShFJqZ6Q/IsWkgH6j8HmhJjpf4QeoOqBPrxO0GwRSRplCe26BfLD8OVgUSdRXKWhfHejXIrP10i7JK0W/9Ntc3ULLVwSpwZDYXeMXXxO5p6I4"
+ "b+tVJW+Mxr1PYjMl/Phzp5ZbQUWA9/q0+uIa9Khpz9FJ88d7mYPxPa9Fjcn3fUu91OUnW6Fd2bxXHQCjHAElC54c+LywMY5n/pIu6JJVK5ebFW2sb2EP7yAtukFfA8UO"
+ "w7WUAngVyIvG9DkP0GE4jicwyRTtdKuxlae7f6SoklvrBdIL1tKt60PD7gHmxp1MgFDtJ7v0SqouU+CdcdJAzRA4yFtV/4Bd9jv+ibrqs2ksyz3gnInV43izQjbqFBtT"
+ "Wqi6HVwx5wlM7AZOXWaM1LxXqZxCofB9I5hM7bZeMoUpU1zlRzGefJC4mvRpUqYJK2lJvpyh991yavrCHDkrdys3muCSN+joBn6mth4RVvIwt6sNxdI6GqK15q6PxOga"
+ "Sp6jY3/xU0X2SLBSSIkJBZcoUqqx1do4hbkxwAoT5IqrVVgpRUvi5eKBqGfM2PJmelPBvDhyrJw1qSQnJYkHqegGBV0yaW7F3nj7isnVxjGldROR0LHblKaiHCaKbuNi"
+ "qdqb/FrvIJz5sdz5XZaaaSX++iXmHCldRcC2LtUpOhk39vgiLzH1zxBqOw2ewadSbpwbU/Up4YssB7Tv1sHkhQ/FzM0gnPG5YlcqxqXulTiLWZhJW8c7mEJoVUdMkmOc"
+ "SMPj1adoNEygU0V2HE8riqjnusTY7Eq8Qoc9aYXGYVmFlhxJ7X81M2GlD6+ndhmXWze9nqq19tejuN/F3lcrKWflVd/k1yxyNDFRDP8C2SClSaYQ+EnKELlf/qtZjMWg"
+ "nPfOgXmxvhzTXCyleQzDdiy5ZsyLVsSBr+4BpdEBA/UU5f094EQnmalgtqkat1aSoxYZnJUnhudqjxqUErHBbtbda8IvXBgvp2rHdxprJyZIX6jcr8pSy7eiRh7E8gGr"
+ "vk5vNqa5pRnCGCvtzJcTy13PRBjjJi1KQ2tguHJiNcqFjgpldYpxQk9tQR/VWOEC7svMbnRdYzpeWu6M8XrOtxRioT+NzAbJTK70FGWhtTAc10Fk1kmGYrRAU30vdafn"
+ "ifbONBsxd37amw0PmILvFGfL8XirieUkZboPqdsUasjArzfH3PvpY/btjSlb6URwpr9RagU/eKXHvd6w6LpJcZ4cnaGahLfih7XWdaoY3U1dlfcyIcZYYKRqDcvInHH5"
+ "MCazU2eHaSuQBc8iPfEq7ruw9JQi6azjary6669XcX2CaK19GKtCXYOwDBgFy60vMsl4Jfz+au0AZcOPUranE1k8SSNaNTxZdCNvqPXuBgvDeFBHjKy3fETcDC1foB+T"
+ "zrpHCblox0w/Zksy1KmZg8lKIQTdRqafoxNJFRemgG97lGipDS7DVFJF7OJzrsaEJ79syZLSdGuYpIyXecnD77IJw0RR5CteUrQHIhvaXvdi0tI4lakPsg4heCHYDVT8"
+ "ozResvDJKSGYOMoWrkGhRuKqmHeYOKIPefSrVcpE/cczTAdwkD3b8ajHyaWwh3UlccSgJrFfXqOwn3Ylkc6wJmtY4FZhP+lKYpphTdXUEZVJWZS8cwDLTckvIYAs02Ah"
+ "fIe5i45RzdRy9bFY6ZYWLurDw9DAGLYx1zUlWO70nEn86zJRy6fTi6YVcJms8Xe7VPdLAoKnNmzoXP1/A0hvTMjKVwR5x0O+BJPGfHfn4u5thmm8sVEgRVHLpKX5K+UW"
+ "GYjnxzi+Cy/Tans1YhL/6plajbW4hkt0/IiJ1it69Rp/OiEXUicpchmNN5YKVKiNQ/1UgbjefmLERVr2U0ehwKEUqbg2jglad/sVCOnmEZ+LENaZ0Uky5oK9bGWJ0cuz"
+ "Jj/8Xa/YoH0WWIPzvjwDqoF8ttLcknayHDxYPvGCskEGjUgrdi5UbjMReUmvWfMRRe/Gwqrx+fdT6fLA6zHedMF3XudxVOpKZkm9C2C6tHMJL+1xUwovMrjGk+poE8sz"
+ "JSgcvzlG49qXT9lgHivBqhmnhpDMosILoe+feJfrUiYc24uBeNVrPk5smrxg4F1sM+lTTHa0eQou5uc1+BRRZCCqWvOxmEuAgSJ32V6szp3Rq3LdqLwbJdLIPugmTTp7"
+ "vZH1pxsk+eNTaPZed5Zz7LXe6Rg8+razpwB92Zkc20nedRQyBe1Yc0koZO/tC18nf/k8Qa+abJVxliYSsiLKawHdgJ/q/PjcSescYcpocPKC4vtIuIlm8R9rR2BtmxhV"
+ "2dmYkN57pWJh48fiY/Zekskx11SCkXth+Ot6k4XsbhINYHNUVkHEbUOPmorRtI9k4SnbmnaXxnwjGZxVuEwXzyffLo3BBicDFCU95DfO1qesC+vQ9lnkdxHHRfwfUHoq"
+ "Z4lTtn+StzW9ISvv+RKZuKqOl/D6I2RItD0rxV3E2TlrLapCJerLJWR2VGzh9NKhRlbDq+bUF31NBtya1eZ233JFdmvkOWfVOVew3ghZ3ZQzbf+7s+RIKufYtONnduou"
+ "KEPL1KmuX/n6J1cj2lVg6GzfHwJQdkxcr4541QlS6XC0QwldvgOv1rxC49PlXMo+TQAXiNx0DKhxcnhgmwxh3zLLlZJ4vZcLQwR51TxxcLVJ+icHeb1X2RAZ4FXzYn7l"
+ "ZyKE/Uw9VyB1hYAEaXo76dcUOTz8mfBXslIK9mpvFvbY26sG5fDoR7wEOzDuvovfzZWZ/POPtSitqAKXK82WlIzWabu1WfMcLckssWkXfd9qGfnrOeMt45EGdGVnWr9c"
+ "OPjruV7d6+6AyGRvEGCRv9GWzfd0Cre0CIPPztunHHOQs6cfNVx+mwk8+cRFvKF4NOscS9XuPYOnT32n/5TyzUrNO85T1w+oZ4gZnKRPNvUV6aFFfZk/lc6xPKcVLg5z"
+ "EmzSD151v/FAg3vJZ4mayZK+ry6ZWSe6Fd5fvF5reFbk1VuXSEM316Biiwq6GnltXv+BWJxJg8vbb6cixpQNJ/paXiLSrGYPprv5Bm+FaNUYOkh5yL4oYhT6xEtSxO2S"
+ "QDrbvCgwpDscxz2IMDD/uajxlXS2bmVjW/ih+vP+mAcPOBUDjGqILdU2xs4iwrpeOnqlpvsPye562bUV9RjoZKCfZ+u49NHRlP1s9ddMevNcuFZA7QW5QS67QROxp0Oq"
+ "WoT5k/B94WtmJzVwVNbsmlK8VCkbVZW3ec5SQYSlKWTlSMYzQ0Qv1H+MZkvHurhCnuEGVRlGeUeKltky41g1g8G8nLIMg7Dni+HlzQLqM4PXl3SK06fGKO76qOz05hwe"
+ "8LOgMxpV+TSZI4t1fpdf2XxNTh3x+PlApa7129wPgQWiQvSXn9vyMBfwCZfKJN2oFhxNs+xKnxwnvzSopn/DwEgNV9Id2veUWEm45v6e1qYDarwl1OP0V7LuVf32jf2x"
+ "hCQy9qvio3pd95EpJUNxd5IMk2jqkxvbaEAD0gJmD8z3mD/y+/5uvjN4j5LV+mWPQQCNbaNEBgy8Vxp8VWqyJ27z2Va0NDvQvaX4XL9hz2zkmv/3ZpONB/2ZNq/GLPep"
+ "7Oohr+Su7LnNFn/U96huipOwAusX0IVlEDJyPAW72L4qR4TBlgdw+j6f5HuqLcBVgjzYOLCInHUUaohNqzIxQe53GtWz6TyfugHjNmHevVILEQVbh3FPtvhKqkS5k/wk"
+ "+pksn/cnSCleArVCloiinUpzZLwK7lOijpCEoaFn15JMyezD08nqoUSokjULXw3dnEjK/Wu0ltnMTuFTIipOLLsRrOQ3DZLk7JIzCyAFPgl0vAHuBqVM4PfV8jzSDz1r"
+ "VO16sIU6k2UKPNIimzXKdn0bpZST7xWUzOI9ahWd+rGlOiU4I9ew8FKDKWd48/NDqxdl5hE5PbSTD9ybs4UmBPWRR4EDV0qHBuPkKUo1c+yJ7ixQzdxIqlh6nRToxvk9"
+ "cLnxBCDy1VUDBWHJiXpPk5Lyh5Hs7/QVrkt1HVReRkb508LDCmJSE9Atk8oakfzgv6M9qJdpZQ39R3ZhWsWbt7D1VZdR9++GyU0reto0qJLn1E2czsh9oVRFiAwWKm5W"
+ "19i9fjSxLU+flgb1TpmS+ANozqe1Tob8GcMkTjbobQNfT3rHzXA0u9uUIZL0D20SILsLy/2TjYZE5pnx7eLFJa587icKMx9cVqmblVJt4iD7f7b6J5uNxTWGgmxjJOoE"
+ "bu+xslasyafSiE/7kzWPPTugnGoxU7q7ecDt8PonM9+TnJAKd68Xbydp7ipihcz2zYJGxMcC/Z289BF3PBwqq5sYPBQbII3uQOGN+coGFMO6wieJL57ABxVu9+eXOLge"
+ "Sy52RvK9dAfGVZRPr/scGrZrzzsFDUTe4p+iXfHgjqzsK6G+HLNqVXJ4UPuL8XFZ99tlEs0B1KU5JQ75dQUGcg/WLkp36Z44ZkKrAo4m10W8iKFVLvz3dbvDHQaP/zB4"
+ "Kr2FtAbRk+aWPfjYraIqa7MjupQPGtCzfvjBaucgZHLk+1lto6haTZLH9O1znsC7u5zdrR7JfbkGGrKmeFFnPrJxj5zpc4+bJyqTkfxOCMJNSlnzH81ryDfviN2UHOSL"
+ "qx42F1Rlb+7IRUdx9ORW+RJFx4ykUFHFwqmxgZlsPjFMoWm9abGP68yrLyi3KbKr77o5NkzIFBSFhB9SMu0XPv3tMRMa3zd+42E/B6WSgUVrg0bgiB1Uo0913bl94zN3"
+ "set6l4CZljqL61zu0tufV3lZG4MT896M9aZue8I06xoP7/YMxykPnsoOmMoMWvMG9bOqaRS9+ES+J8jFFZgRuUb71Ejko2O6VOC1tTP31BkOI9gjGq5UgVS5NQaX1j6/"
+ "ZquIbOV4ifmbEc84aeam2OA5NPrfYEPP6lbVVSaDpULo7aTtXTojnnrnrQ6zJ67kCXlJ10N9+VjWxlxzHjY5xRZ96NJqVnwDS5AYhaakcdHZjcwO1Oh+mnhAmwHj/Hnd"
+ "+13LK/nY9YzhM/YAPFnaiO1wlq66M/d7LmE/uUBYkMwtkSRKgaLcy6j+/k/DCEViJhrUvy6im0c/8n16cs4YL7yKnBmny5DjOb3hs34nCrp9thfzPVuEKo/+lhiuvKW0"
+ "QdfL4aZonjD89nBRc5nnuc7qyTR1ly7z1+6bFSkbPhVOzN0W7ExGGfL8ZVKRFtrG0sebG+vkVDyMtz4gN293NNESPpl16UpT9Nhu0Gz6bDWqBc5DR4dUAtWHH55/o+e8"
+ "cyzDcpHWx0zek6o1FZwj+yNxJ/uh1QcWZz2h5E0fn0P+5lGHBfFVCdAqtLRl8sx+4M/uEiWFLg/TJOeEfUjU5upY5B5UkoC+IvWZjdNWJgEXEJZGkkB7fffma+vdN3Vb"
+ "IqE6wz730t7LT38y3mrPxgjUxlytuMmrcQa/VXU1RKfK6d6bR2Ev3MVrWZaeW/ZRR/LKLmnRePXABKelqycajBi44bknYGwO49AVCw+LSgqjxpGDzT+0iTIj9ltP9r1v"
+ "EWA3953MQbneH476T2BvHoPjd2vqm2aV/JsNy+x7jxdrWp8QeR3+vOj2lt4X1Bolb70F/6r9rGGgx5NDkfMPqlWcT/6HyL6P0/qDTrrJo8fLjrnDzbFPAkgKcj5vaqz9"
+ "dJ7goeWmOcb0f2LwIozVNmQHki31AX1kXOe06T8PZNFwyLD6fbnCpeJfZiiSFzWm00kUuUnsfwF743smxULCwGlFkGrjUxhnnzRUecC/IFwnE7V8O9cARoiGR08a3j9/"
+ "seQlPbMFKiOBvak4ByX8eFg2vvmIBMYVAjgEQ/xeeYtwtoazOr5O8nFcEJsdvhV6KNxpP2mLalSD5bjisPLRQg+bYp0CvGQG0J9dANojcOruMrRH8gf9BABrmVy8qYYX"
+ "rHvs5S3AdZa87kKmhk2VgEXSaLOLFA4oKnj7kQQ3gHETweKytzP2eLn5WPXJqoWU8Dlvm6iEPgOreyRYqGtnImyhYku16Nt7KUGCRBpFdTL91EYE1+FAyIair6l/oe3s"
+ "ZjlD+92ZsBR/kAa0LX0D3SKT/0UMBtEbAn2gi1T9CveUBmkE3c74OUwBzWsQlobYdty6Zvj4Tou7PhBEv03/TvqPwFz8x34a9IHCy+Z6ikPOBkHxhakNy8Eu3u/u6OSq"
+ "aJuSgGvJ4bYFVjQZQKaftgaGJk9gJRuxwGvNeQa6Rk9FSjZcgLrhtoaaeknsJRsaQHcFTJpyiU6s6SHTkzsnr0uMxFhUPtzmZ7xBDK4/iesSffkVVOxakA7NNzso3PBT"
+ "bV8GMT+c1OO9l0/nJoCPCxzocaYwpwD9TGwTtIJ8Rqt9R0XDnJE67AbiwUZwSGE6+kYG2d2U/LvctOtCR8kPUFPOa6wUoNNEJSE7SC1avhcVrew8qgM2uPkvfmuCjOV/"
+ "8NRAVI/z1ylyEGfSP7jUZ1S01r+4gTek8MU/OM9dLlo34aNncah65/dZ5CCFpDYhJ8hH9LV3qGgj5x0dDoMbwXrukMJMtCIz+WRyvhkn7fp/4ErCrpAP/+IMulcMrv+L"
+ "a5iRZ/4P/jwLVfpfxhPukYOs/8UFXqKi7/+L67hCCl/+g/OYXaF1u/r/Dq4Zruvnb0FVA+fje4AxB3phvlJcCXkJ4MLvqvXZXe3si3z8HP7/y/9nyvQDuk2R0+lBcB2j"
+ "we06RFWKhr+ZT8Ghix13RqY/oYpvE/s8L2v2EdESnLBcilHe/NosJyy6cmSlMt0rIH/ty+hprcT/rRaNwqINDGJ+ukZefCYLGamlQ48IluDXy0JzSrNfu+Su8m3jrJSn"
+ "e8VPaiJUY20+MSHfzl2aZaaE71fxbmJzDJZTHu1bgteWi+YVUy7NRFNiPteFVKXEfnLcjjbpVYObMohZKdhcvvTDrkfwt4na6uJcbDmub5CktqIEW4Xr7CJpaq7D1ihb"
+ "NHlPpwW9tGd/f/jip12dV1XlcUNjNcMgeKSstvp0H9vOy2B1K6KvWYsbPWEB80IN+XBwhD4UW61mdlxSVu5vrCRFN6YzXMeJkCjjNmOTW3Sj0wmnhVgf5oWlduXyxlzS"
+ "40bk32Ut3H5sfIt2dCbhvHARx7Sw3K7i1hhBatnEwMiM4yS5jwPFxbRoRr8mAIsWfJgcV5XVKBv1SbObjBmBOGaShziWHjiOniQYJ9RjiiMnIcE8SozFkAGBjaOMXxsJ"
+ "oDt+fIL08BmKxAXlO9yLPgyMXZHJ6NE5Hv54AkdtbouCKi1eylkyJCCaHxfQo4z7BTDFUCQpYT6SdDUaM882KYJ64HxXG/xlySmXlfW9lnA0jAOR2egeOzB/JkEarhBi"
+ "EW2JW+r5gDEkiWkaZXkPx5P2r7ab5HXaMfPnE7R2GUM0oukxIckRTZskBwvt9zAjc3SFDPjPWHqcMyC/cYftC/wLcL0vsu44q4VJVRvX0quNCQLk+BdIIFaqgWPZhMe7"
+ "z1rIVENwXORIKdXOyK9/7804cS72LwlNoDr4/IEHZpQrO5PwyQKLmP5r9aasiN8nu7/YfmjVVHCjRhi/OIoispK0riHAB0sIZ4DyCuLGP2IJIV6/hHD83xaBSwh7gPIa"
+ "QvI/hc8iQuoAi7AG+KwgZOqxCMv/FIFYxH2AzxpC9m+BW0DIQ7GIe3+Lg0WEGQC3jFAKXEQYAXCLCDXoIsLgb3GwgNAD4FYQmoELCC0A7wJCF7qAuPO3WA5QEQAw+HeI"
+ "YJv4GKMTmuZZfmCMKTp1MchkN1wCaAGKC9GE4EtI/ATxQ4KvCQP/bD0HiZYArwWE0wN/FTZAbqPDlSTMKGnnfdwXLW48iKQmn9D2opcIBWxhEUHCcBUagGKTA688xhjY"
+ "icDhtcnxdwF+iQS2jE4iLYB7BZHS2IRnUEURRWr9JfmiEwmuWOwvRYAAnqa3uy0IYI0LGQzFpYHGAjos+vx9uVTp8C2u3PgyElb/Dkctvx0OVXm8Z1l2RBugz6/Ds6IR"
+ "T+0ohr+hPdpmCmDHWaQnR6QDhjGSRbGYTdDzHqLWz662G/+9NTUAEO3yDVDY9Ho+KuR8LnOnwNa2NTLwnCH9rS0iKIwYRTxEdkQdLA/sp3wZbW9/TDjDjgylvLcYtbez"
+ "s2092RMNcQYco419JSSq3yLwyxtn4x0X2+Mrs/vIjubzlxYO7YQV5FuVsIN0FdsLbEoBUvmM5pGk8hkX8mIX225MHi3V/vhkbtIB8S08iKjSfnp6tr2vfXHQssk39M33"
+ "HLsp+etCpaP19OhHbmXKvm9BG/F86+yKSg6Jzp+Nw0zRdMTx4jyv0sW6du2+9loUYfu1fevjYOJGc+Pxye8T2fnWsKPtzM2ovPy2yN3TL6NhEWFh56t7e9q/vAFaESd4"
+ "vm4JPuLJ+OGOhO8ZY/DR7I3fh5I7DY9Gfc6V93YPjR/NPw44X8HPqLwmEjV+2eZHHg/QAeJPfmNT0hGMEafbyKh7R9iV3pXDdEtfiW9ICcl5RBTi5OikJWrvbG/7EKkS"
+ "EXS4/91iRuVtW3jAEfaQELWTO0Rz3kPb6SeJlJCYV25BGCsRzk/5RqPCLOw6Pv2y7ah4e352cDpysLp3RpB9PapyEdIeetI7ZJE+Ws1bcBF6jtN469B6kj5PvI0AAoZa"
+ "1hjqccTFjVW+Z3PH5gh+EOTnydDmvLxy+O7676UCxNnpnC9Xx+XM9rAr31sKBn4R44KIQxYWHWere98nLWY6+guWQDoB8KO9lM3L37w431pBdhydbh2e9KarKLWfe6/s"
+ "V4L5iLs/uBC/gM9XD9ORyHkL8ChSmQhuX0gjK0y4oPDi49Lsf8YA+F9CaCJi5SEAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<1224 and y<833):
return g[y*1224 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<1224 and y<833):
g[y*1224 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,1,1224)
gw(2,1,817)
gw(3,1,1000000)
gw(4,2,0)
sa(gr(3,1)-1)
sa(gr(3,1)-1)
gw(tm(gr(3,1)-1,gr(1,1)),(td(gr(3,1)-1,gr(1,1)))+4,0)
return 1
def _1():
return (27)if(sp()!=0)else(2)
def _2():
gw(7,0,0)
sp();
sa(1)
sa(1)
return 3
def _3():
sa(gr(7,0)+9)
gw(7,0,gr(7,0)+1)
sa(0)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sp()*gr(7,0))
return (26)if(gr(7,0)!=10)else(4)
def _4():
gw(0,4,0)
gw(tm(169,gr(1,1)),(td(169,gr(1,1)))+4,3)
gw(tm(363601,gr(1,1)),(td(363601,gr(1,1)))+4,3)
gw(tm(1454,gr(1,1)),(td(1454,gr(1,1)))+4,3)
gw(tm(871,gr(1,1)),(td(871,gr(1,1)))+4,2)
gw(tm(45361,gr(1,1)),(td(45361,gr(1,1)))+4,2)
gw(tm(872,gr(1,1)),(td(872,gr(1,1)))+4,2)
gw(tm(45362,gr(1,1)),(td(45362,gr(1,1)))+4,2)
gw(1,2,1)
sp();
sa(1)
return 5
def _5():
gw(2,2,0)
gw(9,2,0)
gw(3,2,0)
return 6
def _6():
return (7)if((gr(gr(3,2)+9,2)-gr(1,2))!=0)else(13)
def _7():
sa(gr(1,2))
sa(gr(2,2)+10)
gw(2,2,gr(2,2)+1)
sa(2)
v0=sp()
v1=sp()
gw(v1,v0,sp())
gw(3,2,gr(3,2)+1)
sa(0)
sa(gr(1,2))
return (24)if((gr(1,2))!=0)else(8)
def _8():
sp();
return 9
def _9():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sr());
return (23)if(sp()!=0)else(10)
def _10():
sa(sp()+sp());
sa(sr());
gw(1,2,sp())
sa((1)if(sp()>gr(3,1))else(0))
return (6)if(sp()!=0)else(11)
def _11():
global t0
t0=gr(tm(gr(1,2),gr(1,1)),(td(gr(1,2),gr(1,1)))+4)
return (12)if((gr(tm(gr(1,2),gr(1,1)),(td(gr(1,2),gr(1,1)))+4))!=0)else(6)
def _12():
global t0
t0=t0+gr(2,2)
gw(2,2,t0)
return 13
def _13():
return (14)if(gr(2,2)!=60)else(22)
def _14():
sa(1)
sa((1)if(gr(10,2)>gr(3,1))else(0))
return 15
def _15():
return (17)if(sp()!=0)else(16)
def _16():
global t0
t0=gr(2,2)+1
sa(sr());
sa(sr());
sa(t0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
v0=sp()
sa(sp()-v0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+9)
sa(2)
v0=sp()
sa(gr(sp(),v0))
sa(tm(sr(),gr(1,1)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,1)))
sa(sp()+4)
v0=sp()
v1=sp()
gw(v1,v0,sp())
return 17
def _17():
return (21)if((sr()-gr(3,2))!=0)else(18)
def _18():
sp();
return (20)if((sr()-gr(3,1))!=0)else(19)
def _19():
sys.stdout.write(str(gr(4,2))+" ")
sys.stdout.flush()
sp();
return 28
def _20():
sa(sp()+1)
sa(sr());
gw(1,2,sp())
return 5
def _21():
sa(sp()+1)
sa((1)if(gr(sr()+9,2)>gr(3,1))else(0))
return 15
def _22():
gw(4,2,gr(4,2)+1)
return 14
def _23():
sa(sp()+sp());
return 9
def _24():
sa(gr((sr()%10)+9,0))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/10);
sa(sr());
return 25
def _25():
return (24)if(sp()!=0)else(8)
def _26():
sa(sr());
return 3
def _27():
sa(sp()-1)
sa(sr());
sa(0)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),gr(1,1)))
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(td(sp(),gr(1,1)))
sa(sp()+4)
v0=sp()
v1=sp()
gw(v1,v0,sp())
sa(sr());
return 1
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27]
c=0
while c<28:
c=m[c]()
| |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
# pylint: skip-file
import socket
import struct
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralProxyError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5AuthError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks4Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HTTPError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype,addr,port,rdns,username,password)
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self,family,type,proto,_sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, bytes):
"""__recvall(bytes) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = ""
while len(data) < bytes:
data = data + self.recv(bytes-len(data))
return data
def setproxy(self,proxytype=None,addr=None,port=None,rdns=True,username=None,password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype,addr,port,rdns,username,password)
def __negotiatesocks5(self,destaddr,destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall("\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall("\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0] != "\x05":
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1] == "\x00":
# No authentication is required
pass
elif chosenauth[1] == "\x02":
# Okay, we need to perform a basic username/password
# authentication.
self.sendall("\x01" + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0] != "\x01":
# Bad response
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if authstat[1] != "\x00":
# Authentication failed
self.close()
raise Socks5AuthError,((3,_socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == "\xFF":
raise Socks5AuthError((2,_socks5autherrors[2]))
else:
raise GeneralProxyError((1,_generalerrors[1]))
# Now we can request the actual connection
req = "\x05\x01\x00"
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + "\x01" + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]==True:
# Resolve remotely
ipaddr = None
req = req + "\x03" + chr(len(destaddr)) + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + "\x01" + ipaddr
req = req + struct.pack(">H",destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0] != "\x05":
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
elif resp[1] != "\x00":
# Connection failed
self.close()
if ord(resp[1])<=8:
raise Socks5Error(ord(resp[1]),_generalerrors[ord(resp[1])])
else:
raise Socks5Error(9,_generalerrors[9])
# Get the bound address/port
elif resp[3] == "\x01":
boundaddr = self.__recvall(4)
elif resp[3] == "\x03":
resp = resp + self.recv(1)
boundaddr = self.__recvall(resp[4])
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H",self.__recvall(2))[0]
self.__proxysockname = (boundaddr,boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr),destport)
else:
self.__proxypeername = (destaddr,destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]==True:
ipaddr = "\x00\x00\x00\x01"
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = "\x04\x01" + struct.pack(">H",destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + "\x00"
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv==True:
req = req + destaddr + "\x00"
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0] != "\x00":
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1] != "\x5A":
# Server returned an error
self.close()
if ord(resp[1]) in (91,92,93):
self.close()
raise Socks4Error((ord(resp[1]),_socks4errors[ord(resp[1])-90]))
else:
raise Socks4Error((94,_socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]),struct.unpack(">H",resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr),destport)
else:
self.__proxypeername = (destaddr,destport)
def __negotiatehttp(self,destaddr,destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if self.__proxy[3] == False:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n")
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n")==-1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ",2)
if statusline[0] not in ("HTTP/1.0","HTTP/1.1"):
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode,statusline[2]))
self.__proxysockname = ("0.0.0.0",0)
self.__proxypeername = (addr,destport)
def connect(self,destpair):
"""connect(self,despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (type(destpair) in (list,tuple)==False) or (len(destpair)<2) or (type(destpair[0])!=str) or (type(destpair[1])!=int):
raise GeneralProxyError((5,_generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1],portnum))
self.__negotiatesocks5(destpair[0],destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1],portnum))
self.__negotiatesocks4(destpair[0],destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1],portnum))
self.__negotiatehttp(destpair[0],destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self,(destpair[0],destpair[1]))
else:
raise GeneralProxyError((4,_generalerrors[4]))
| |
#!/usr/bin/env python
#
# HTTP support for ooni-probe
# by Jacob Appelbaum <jacob@appelbaum.net>
# Arturo Filasto' <art@fuffa.org>
#
from socket import gethostbyname
import ooni.common
import ooni.helpers
import ooni.report
import urllib2
import httplib
from urlparse import urlparse
from pprint import pprint
import pycurl
import random
import string
import re
from pprint import pprint
try:
from BeautifulSoup import BeautifulSoup
except:
pass # Never mind, let's break later.
# By default, we'll be Torbutton's UA
default_ua = { 'User-Agent' :
'Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.0' }
# Use pycurl to connect over a proxy
PROXYTYPE_SOCKS5 = 5
default_proxy_type = PROXYTYPE_SOCKS5
default_proxy_host = "127.0.0.1"
default_proxy_port = "9050"
#class HTTPResponse(object):
# def __init__(self):
"""A very basic HTTP fetcher that uses Tor by default and returns a curl
object."""
def http_proxy_fetch(url, headers, proxy_type=5,
proxy_host="127.0.0.1",
proxy_port=9050):
request = pycurl.Curl()
request.setopt(pycurl.PROXY, proxy_host)
request.setopt(pycurl.PROXYPORT, proxy_port)
request.setopt(pycurl.PROXYTYPE, proxy_type)
request.setopt(pycurl.HTTPHEADER, ["User-Agent: Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.0"])
request.setopt(pycurl.URL, url)
response = request.perform()
http_code = getinfo(pycurl.HTTP_CODE)
return response, http_code
"""A very basic HTTP fetcher that returns a urllib2 response object."""
def http_fetch(url,
headers= default_ua,
label="generic HTTP fetch"):
request = urllib2.Request(url, None, headers)
response = urllib2.urlopen(request)
return response
"""Connect to test_hostname on port 80, request url and compare it with the expected
control_result. Optionally, a label may be set to customize
output. If the experiment matches the control, this returns True with the http
status code; otherwise it returns False.
"""
def http_content_match(experimental_url, control_result,
headers= { 'User-Agent' : default_ua },
label="generic HTTP content comparison"):
request = urllib2.Request(experimental_url, None, headers)
response = urllib2.urlopen(request)
responseContents = response.read()
responseCode = response.code
if responseContents != False:
if str(responseContents) != str(control_result):
print label + " control " + str(control_result) + " data does not " \
"match experiment response: " + str(responseContents)
return False, responseCode
return True, responseCode
else:
print "HTTP connection appears to have failed"
return False, False
"""Connect to test_hostname on port 80, request url and compare it with the expected
control_result as a regex. Optionally, a label may be set to customize
output. If the experiment matches the control, this returns True with the HTTP
status code; otherwise it returns False.
"""
def http_content_fuzzy_match(experimental_url, control_result,
headers= { 'User-Agent' : default_ua },
label="generic HTTP content comparison"):
request = urllib2.Request(experimental_url, None, headers)
response = urllib2.urlopen(request)
responseContents = response.read()
responseCode = response.code
pattern = re.compile(control_result)
match = pattern.search(responseContents)
if responseContents != False:
if not match:
print label + " control " + str(control_result) + " data does not " \
"match experiment response: " + str(responseContents)
return False, responseCode
return True, responseCode
else:
print "HTTP connection appears to have failed"
return False, False
"""Compare two HTTP status codes as integers and return True if they match."""
def http_status_code_match(experiment_code, control_code):
if int(experiment_code) != int(control_code):
return False
return True
"""Compare two HTTP status codes as integers and return True if they don't match."""
def http_status_code_no_match(experiment_code, control_code):
if http_status_code_match(experiment_code, control_code):
return False
return True
"""Connect to a URL and compare the control_header/control_result with the data
served by the remote server. Return True if it matches, False if it does not."""
def http_header_match(experiment_url, control_header, control_result):
response = http_fetch(url, label=label)
remote_header = response.get_header(control_header)
if str(remote_header) == str(control_result):
return True
else:
return False
"""Connect to a URL and compare the control_header/control_result with the data
served by the remote server. Return True if it does not matche, False if it does."""
def http_header_no_match(experiment_url, control_header, control_result):
match = http_header_match(experiment_url, control_header, control_result)
if match:
return False
else:
return True
def send_browser_headers(self, browser, conn):
headers = ooni.helpers.get_random_headers(self)
for h in headers:
conn.putheader(h[0], h[1])
conn.endheaders()
return True
def http_request(self, method, url, path=None):
purl = urlparse(url)
host = purl.netloc
conn = httplib.HTTPConnection(host, 80)
conn.connect()
if path is None:
path = purl.path
conn.putrequest(method, purl.path)
send_browser_headers(self, None, conn)
response = conn.getresponse()
headers = dict(response.getheaders())
self.headers = headers
self.data = response.read()
return True
def search_headers(self, s_headers, url):
if http_request(self, "GET", url):
headers = self.headers
else:
return None
result = {}
for h in s_headers.items():
result[h[0]] = h[0] in headers
return result
# XXX for testing
# [('content-length', '9291'), ('via', '1.0 cache_server:3128 (squid/2.6.STABLE21)'), ('x-cache', 'MISS from cache_server'), ('accept-ranges', 'bytes'), ('server', 'Apache/2.2.16 (Debian)'), ('last-modified', 'Fri, 22 Jul 2011 03:00:31 GMT'), ('connection', 'close'), ('etag', '"105801a-244b-4a89fab1e51c0;49e684ba90c80"'), ('date', 'Sat, 23 Jul 2011 03:03:56 GMT'), ('content-type', 'text/html'), ('x-cache-lookup', 'MISS from cache_server:3128')]
"""Search for squid headers by requesting a random site and checking if the headers have been rewritten (active, not fingerprintable)"""
def search_squid_headers(self):
test_name = "squid header"
self.logger.info("RUNNING %s test" % test_name)
url = ooni.helpers.get_random_url(self)
s_headers = {'via': '1.0 cache_server:3128 (squid/2.6.STABLE21)', 'x-cache': 'MISS from cache_server', 'x-cache-lookup':'MISS from cache_server:3128'}
ret = search_headers(self, s_headers, url)
for i in ret.items():
if i[1] is True:
self.logger.info("the %s test returned False" % test_name)
return False
self.logger.info("the %s test returned True" % test_name)
return True
def random_bad_request(self):
url = ooni.helpers.get_random_url(self)
r_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(random.randint(5,20)))
if http_request(self, r_str, url):
return True
else:
return None
"""Create a request made up of a random string of 5-20 chars (active technique, possibly fingerprintable)"""
def squid_search_bad_request(self):
test_name = "squid bad request"
self.logger.info("RUNNING %s test" % test_name)
if random_bad_request(self):
s_headers = {'X-Squid-Error' : 'ERR_INVALID_REQ 0'}
for i in s_headers.items():
if i[0] in self.headers:
self.logger.info("the %s test returned False" % test_name)
return False
self.logger.info("the %s test returned True" % test_name)
return True
else:
self.logger.warning("the %s test returned failed" % test_name)
return None
"""Try requesting cache_object and expect as output access denied (very active technique, fingerprintable) """
def squid_cacheobject_request(self):
url = ooni.helpers.get_random_url(self)
test_name = "squid cacheobject"
self.logger.info("RUNNING %s test" % test_name)
if http_request(self, "GET", url, "cache_object://localhost/info"):
soup = BeautifulSoup(self.data)
if soup.find('strong') and soup.find('strong').string == "Access Denied.":
self.logger.info("the %s test returned False" % test_name)
return False
else:
self.logger.info("the %s test returned True" % test_name)
return True
else:
self.logger.warning("the %s test failed" % test_name)
return None
def MSHTTP_CP_Tests(self):
test_name = "MS HTTP Captive Portal"
self.logger.info("RUNNING %s test" % test_name)
experiment_url = "http://www.msftncsi.com/ncsi.txt"
expectedResponse = "Microsoft NCSI" # Only this - nothing more
expectedResponseCode = "200" # Must be this - nothing else
label = "MS HTTP"
headers = { 'User-Agent' : 'Microsoft NCSI' }
content_match, experiment_code = http_content_match(experiment_url, expectedResponse,
headers, label)
status_match = http_status_code_match(expectedResponseCode,
experiment_code)
if status_match and content_match:
self.logger.info("the %s test returned True" % test_name)
return True
else:
print label + " experiment would conclude that the network is filtered."
self.logger.info("the %s test returned False" % test_name)
return False
def AppleHTTP_CP_Tests(self):
test_name = "Apple HTTP Captive Portal"
self.logger.info("RUNNING %s test" % test_name)
experiment_url = "http://www.apple.com/library/test/success.html"
expectedResponse = "Success" # There is HTML that contains this string
expectedResponseCode = "200"
label = "Apple HTTP"
headers = { 'User-Agent' : 'Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) '
'AppleWebKit/420+ (KHTML, like Gecko) Version/3.0'
' Mobile/1A543a Safari/419.3' }
content_match, experiment_code = http_content_fuzzy_match(
experiment_url, expectedResponse, headers)
status_match = http_status_code_match(expectedResponseCode,
experiment_code)
if status_match and content_match:
self.logger.info("the %s test returned True" % test_name)
return True
else:
print label + " experiment would conclude that the network is filtered."
print label + "content match:" + str(content_match) + " status match:" + str(status_match)
self.logger.info("the %s test returned False" % test_name)
return False
def WC3_CP_Tests(self):
test_name = "W3 Captive Portal"
self.logger.info("RUNNING %s test" % test_name)
url = "http://tools.ietf.org/html/draft-nottingham-http-portal-02"
draftResponseCode = "428"
label = "WC3 draft-nottingham-http-portal"
response = http_fetch(url, label=label)
responseCode = response.code
if http_status_code_no_match(responseCode, draftResponseCode):
self.logger.info("the %s test returned True" % test_name)
return True
else:
print label + " experiment would conclude that the network is filtered."
print label + " status match:" + status_match
self.logger.info("the %s test returned False" % test_name)
return False
# Google ChromeOS fetches this url in guest mode
# and they expect the user to authenticate
def googleChromeOSHTTPTest(self):
print "noop"
#url = "http://www.google.com/"
def SquidHeader_TransparentHTTP_Tests(self):
return search_squid_headers(self)
def SquidBadRequest_TransparentHTTP_Tests(self):
return squid_search_bad_request(self)
def SquidCacheobject_TransparentHTTP_Tests(self):
return squid_cacheobject_request(self)
| |
from __future__ import absolute_import, unicode_literals
import itertools
import json
from functools import total_ordering
from django.core.urlresolvers import reverse
from django.forms import widgets
from django.forms.utils import flatatt
from django.template.loader import render_to_string
from django.utils.encoding import python_2_unicode_compatible
from django.utils.formats import get_format
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagWidget
from wagtail.utils.widgets import WidgetWithScript
from wagtail.wagtailcore import hooks
from wagtail.wagtailcore.models import Page
class AdminAutoHeightTextInput(WidgetWithScript, widgets.Textarea):
def __init__(self, attrs=None):
# Use more appropriate rows default, given autoheight will alter this anyway
default_attrs = {'rows': '1'}
if attrs:
default_attrs.update(attrs)
super(AdminAutoHeightTextInput, self).__init__(default_attrs)
def render_js_init(self, id_, name, value):
return 'autosize($("#{0}"));'.format(id_)
class AdminDateInput(WidgetWithScript, widgets.DateInput):
# Set a default date format to match the one that our JS date picker expects -
# it can still be overridden explicitly, but this way it won't be affected by
# the DATE_INPUT_FORMATS setting
def __init__(self, attrs=None, format='%Y-%m-%d'):
super(AdminDateInput, self).__init__(attrs=attrs, format=format)
def render_js_init(self, id_, name, value):
return 'initDateChooser({0}, {1});'.format(
json.dumps(id_),
json.dumps({'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK')})
)
class AdminTimeInput(WidgetWithScript, widgets.TimeInput):
def __init__(self, attrs=None, format='%H:%M'):
super(AdminTimeInput, self).__init__(attrs=attrs, format=format)
def render_js_init(self, id_, name, value):
return 'initTimeChooser({0});'.format(json.dumps(id_))
class AdminDateTimeInput(WidgetWithScript, widgets.DateTimeInput):
def __init__(self, attrs=None, format='%Y-%m-%d %H:%M'):
super(AdminDateTimeInput, self).__init__(attrs=attrs, format=format)
def render_js_init(self, id_, name, value):
return 'initDateTimeChooser({0}, {1});'.format(
json.dumps(id_),
json.dumps({'dayOfWeekStart': get_format('FIRST_DAY_OF_WEEK')})
)
class AdminTagWidget(WidgetWithScript, TagWidget):
def render_js_init(self, id_, name, value):
return "initTagField({0}, {1});".format(
json.dumps(id_),
json.dumps(reverse('wagtailadmin_tag_autocomplete')))
class AdminChooser(WidgetWithScript, widgets.Input):
input_type = 'hidden'
choose_one_text = _("Choose an item")
choose_another_text = _("Choose another item")
clear_choice_text = _("Clear choice")
link_to_chosen_text = _("Edit this item")
show_edit_link = True
# when looping over form fields, this one should appear in visible_fields, not hidden_fields
# despite the underlying input being type="hidden"
is_hidden = False
def get_instance(self, model_class, value):
# helper method for cleanly turning 'value' into an instance object
if value is None:
return None
try:
return model_class.objects.get(pk=value)
except model_class.DoesNotExist:
return None
def get_instance_and_id(self, model_class, value):
if value is None:
return (None, None)
elif isinstance(value, model_class):
return (value, value.pk)
else:
try:
return (model_class.objects.get(pk=value), value)
except model_class.DoesNotExist:
return (None, None)
def value_from_datadict(self, data, files, name):
# treat the empty string as None
result = super(AdminChooser, self).value_from_datadict(data, files, name)
if result == '':
return None
else:
return result
def __init__(self, **kwargs):
# allow choose_one_text / choose_another_text to be overridden per-instance
if 'choose_one_text' in kwargs:
self.choose_one_text = kwargs.pop('choose_one_text')
if 'choose_another_text' in kwargs:
self.choose_another_text = kwargs.pop('choose_another_text')
if 'clear_choice_text' in kwargs:
self.clear_choice_text = kwargs.pop('clear_choice_text')
if 'link_to_chosen_text' in kwargs:
self.link_to_chosen_text = kwargs.pop('link_to_chosen_text')
if 'show_edit_link' in kwargs:
self.show_edit_link = kwargs.pop('show_edit_link')
super(AdminChooser, self).__init__(**kwargs)
class AdminPageChooser(AdminChooser):
choose_one_text = _('Choose a page')
choose_another_text = _('Choose another page')
link_to_chosen_text = _('Edit this page')
def __init__(self, target_models=None, can_choose_root=False, **kwargs):
super(AdminPageChooser, self).__init__(**kwargs)
self.target_models = list(target_models or [Page])
self.can_choose_root = can_choose_root
def _get_lowest_common_page_class(self):
"""
Return a Page class that is an ancestor for all Page classes in
``target_models``, and is also a concrete Page class itself.
"""
if len(self.target_models) == 1:
# Shortcut for a single page type
return self.target_models[0]
else:
return Page
def render_html(self, name, value, attrs):
model_class = self._get_lowest_common_page_class()
instance, value = self.get_instance_and_id(model_class, value)
original_field_html = super(AdminPageChooser, self).render_html(name, value, attrs)
return render_to_string("wagtailadmin/widgets/page_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'page': instance,
})
def render_js_init(self, id_, name, value):
if isinstance(value, Page):
page = value
else:
# Value is an ID look up object
model_class = self._get_lowest_common_page_class()
page = self.get_instance(model_class, value)
parent = page.get_parent() if page else None
return "createPageChooser({id}, {model_names}, {parent}, {can_choose_root});".format(
id=json.dumps(id_),
model_names=json.dumps([
'{app}.{model}'.format(
app=model._meta.app_label,
model=model._meta.model_name)
for model in self.target_models
]),
parent=json.dumps(parent.id if parent else None),
can_choose_root=('true' if self.can_choose_root else 'false')
)
@python_2_unicode_compatible
@total_ordering
class Button(object):
def __init__(self, label, url, classes=set(), attrs={}, priority=1000):
self.label = label
self.url = url
self.classes = classes
self.attrs = attrs.copy()
self.priority = priority
def render(self):
attrs = {'href': self.url, 'class': ' '.join(sorted(self.classes))}
attrs.update(self.attrs)
return format_html('<a{}>{}</a>', flatatt(attrs), self.label)
def __str__(self):
return self.render()
def __repr__(self):
return '<Button: {}>'.format(self.label)
def __lt__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.priority, self.label) < (other.priority, other.label)
def __eq__(self, other):
if not isinstance(other, Button):
return NotImplemented
return (self.label == other.label and
self.url == other.url and
self.classes == other.classes and
self.attrs == other.attrs and
self.priority == other.priority)
class PageListingButton(Button):
def __init__(self, label, url, classes=set(), **kwargs):
classes = {'button', 'button-small', 'button-secondary'} | set(classes)
super(PageListingButton, self).__init__(label, url, classes=classes, **kwargs)
class BaseDropdownMenuButton(Button):
def __init__(self, *args, **kwargs):
super(BaseDropdownMenuButton, self).__init__(*args, url=None, **kwargs)
def get_buttons_in_dropdown(self):
raise NotImplementedError
def render(self):
return render_to_string(self.template_name, {
'buttons': self.get_buttons_in_dropdown(),
'label': self.label,
'title': self.attrs.get('title'),
'is_parent': self.is_parent})
class ButtonWithDropdownFromHook(BaseDropdownMenuButton):
template_name = 'wagtailadmin/pages/listing/_button_with_dropdown.html'
def __init__(self, label, hook_name, page, page_perms, is_parent, **kwargs):
self.hook_name = hook_name
self.page = page
self.page_perms = page_perms
self.is_parent = is_parent
super(ButtonWithDropdownFromHook, self).__init__(label, **kwargs)
def get_buttons_in_dropdown(self):
button_hooks = hooks.get_hooks(self.hook_name)
return sorted(itertools.chain.from_iterable(
hook(self.page, self.page_perms, self.is_parent)
for hook in button_hooks))
| |
#!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Closure compiler on JavaScript files to check for errors and produce
minified output."""
import argparse
import os
import re
import subprocess
import sys
import tempfile
import build.inputs
import processor
import error_filter
_CURRENT_DIR = os.path.join(os.path.dirname(__file__))
class Checker(object):
"""Runs the Closure compiler on given source files to typecheck them
and produce minified output."""
_JAR_COMMAND = [
"java",
"-jar",
"-Xms1024m",
"-client",
"-XX:+TieredCompilation"
]
_MAP_FILE_FORMAT = "%s.map"
def __init__(self, verbose=False, strict=False):
"""
Args:
verbose: Whether this class should output diagnostic messages.
strict: Whether the Closure Compiler should be invoked more strictly.
"""
self._runner_jar = os.path.join(_CURRENT_DIR, "runner", "runner.jar")
self._temp_files = []
self._verbose = verbose
self._strict = strict
self._error_filter = error_filter.PromiseErrorFilter()
def _nuke_temp_files(self):
"""Deletes any temp files this class knows about."""
if not self._temp_files:
return
self._log_debug("Deleting temp files: %s" % ", ".join(self._temp_files))
for f in self._temp_files:
os.remove(f)
self._temp_files = []
def _log_debug(self, msg, error=False):
"""Logs |msg| to stdout if --verbose/-v is passed when invoking this script.
Args:
msg: A debug message to log.
"""
if self._verbose:
print "(INFO) %s" % msg
def _log_error(self, msg):
"""Logs |msg| to stderr regardless of --flags.
Args:
msg: An error message to log.
"""
print >> sys.stderr, "(ERROR) %s" % msg
def _run_jar(self, jar, args):
"""Runs a .jar from the command line with arguments.
Args:
jar: A file path to a .jar file
args: A list of command line arguments to be passed when running the .jar.
Return:
(exit_code, stderr) The exit code of the command (e.g. 0 for success) and
the stderr collected while running |jar| (as a string).
"""
shell_command = " ".join(self._JAR_COMMAND + [jar] + args)
self._log_debug("Running jar: %s" % shell_command)
devnull = open(os.devnull, "w")
kwargs = {"stdout": devnull, "stderr": subprocess.PIPE, "shell": True}
process = subprocess.Popen(shell_command, **kwargs)
_, stderr = process.communicate()
return process.returncode, stderr
def _get_line_number(self, match):
"""When chrome is built, it preprocesses its JavaScript from:
<include src="blah.js">
alert(1);
to:
/* contents of blah.js inlined */
alert(1);
Because Closure Compiler requires this inlining already be done (as
<include> isn't valid JavaScript), this script creates temporary files to
expand all the <include>s.
When type errors are hit in temporary files, a developer doesn't know the
original source location to fix. This method maps from /tmp/file:300 back to
/original/source/file:100 so fixing errors is faster for developers.
Args:
match: A re.MatchObject from matching against a line number regex.
Returns:
The fixed up /file and :line number.
"""
real_file = self._processor.get_file_from_line(match.group(1))
return "%s:%d" % (os.path.abspath(real_file.file), real_file.line_number)
def _filter_errors(self, errors):
"""Removes some extraneous errors. For example, we ignore:
Variable x first declared in /tmp/expanded/file
Because it's just a duplicated error (it'll only ever show up 2+ times).
We also ignore Promose-based errors:
found : function (VolumeInfo): (Promise<(DirectoryEntry|null)>|null)
required: (function (Promise<VolumeInfo>): ?|null|undefined)
as templates don't work with Promises in all cases yet. See
https://github.com/google/closure-compiler/issues/715 for details.
Args:
errors: A list of string errors extracted from Closure Compiler output.
Return:
A slimmer, sleeker list of relevant errors (strings).
"""
first_declared_in = lambda e: " first declared in " not in e
return self._error_filter.filter(filter(first_declared_in, errors))
def _clean_up_error(self, error):
"""Reverse the effects that funky <include> preprocessing steps have on
errors messages.
Args:
error: A Closure compiler error (2 line string with error and source).
Return:
The fixed up error string.
"""
expanded_file = self._expanded_file
fixed = re.sub("%s:(\d+)" % expanded_file, self._get_line_number, error)
return fixed.replace(expanded_file, os.path.abspath(self._file_arg))
def _format_errors(self, errors):
"""Formats Closure compiler errors to easily spot compiler output.
Args:
errors: A list of strings extracted from the Closure compiler's output.
Returns:
A formatted output string.
"""
contents = "\n## ".join("\n\n".join(errors).splitlines())
return "## %s" % contents if contents else ""
def _create_temp_file(self, contents):
"""Creates an owned temporary file with |contents|.
Args:
content: A string of the file contens to write to a temporary file.
Return:
The filepath of the newly created, written, and closed temporary file.
"""
with tempfile.NamedTemporaryFile(mode="wt", delete=False) as tmp_file:
self._temp_files.append(tmp_file.name)
tmp_file.write(contents)
return tmp_file.name
def _run_js_check(self, sources, out_file=None, externs=None,
closure_args=None):
"""Check |sources| for type errors.
Args:
sources: Files to check.
out_file: A file where the compiled output is written to.
externs: @extern files that inform the compiler about custom globals.
closure_args: Arguments passed directly to the Closure compiler.
Returns:
(errors, stderr) A parsed list of errors (strings) found by the compiler
and the raw stderr (as a string).
"""
args = ["--js=%s" % s for s in sources]
if out_file:
out_dir = os.path.dirname(out_file)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
args += ["--js_output_file=%s" % out_file]
args += ["--create_source_map=%s" % (self._MAP_FILE_FORMAT % out_file)]
if externs:
args += ["--externs=%s" % e for e in externs]
if closure_args:
args += ["--%s" % arg for arg in closure_args]
args_file_content = " %s" % " ".join(args)
self._log_debug("Args: %s" % args_file_content.strip())
args_file = self._create_temp_file(args_file_content)
self._log_debug("Args file: %s" % args_file)
runner_args = ["--compiler-args-file=%s" % args_file]
_, stderr = self._run_jar(self._runner_jar, runner_args)
errors = stderr.strip().split("\n\n")
maybe_summary = errors.pop()
if re.search(".*error.*warning.*typed", maybe_summary):
self._log_debug("Summary: %s" % maybe_summary)
else:
# Not a summary. Running the jar failed. Bail.
self._log_error(stderr)
self._nuke_temp_files()
sys.exit(1)
if errors and out_file:
if os.path.exists(out_file):
os.remove(out_file)
if os.path.exists(self._MAP_FILE_FORMAT % out_file):
os.remove(self._MAP_FILE_FORMAT % out_file)
return errors, stderr
def check(self, source_file, out_file=None, depends=None, externs=None,
closure_args=None):
"""Closure compiler |source_file| while checking for errors.
Args:
source_file: A file to check.
out_file: A file where the compiled output is written to.
depends: Files that |source_file| requires to run (e.g. earlier <script>).
externs: @extern files that inform the compiler about custom globals.
closure_args: Arguments passed directly to the Closure compiler.
Returns:
(found_errors, stderr) A boolean indicating whether errors were found and
the raw Closure compiler stderr (as a string).
"""
self._log_debug("FILE: %s" % source_file)
if source_file.endswith("_externs.js"):
self._log_debug("Skipping externs: %s" % source_file)
return
self._file_arg = source_file
cwd, tmp_dir = os.getcwd(), tempfile.gettempdir()
rel_path = lambda f: os.path.join(os.path.relpath(cwd, tmp_dir), f)
depends = depends or []
includes = [rel_path(f) for f in depends + [source_file]]
contents = ['<include src="%s">' % i for i in includes]
meta_file = self._create_temp_file("\n".join(contents))
self._log_debug("Meta file: %s" % meta_file)
self._processor = processor.Processor(meta_file)
self._expanded_file = self._create_temp_file(self._processor.contents)
self._log_debug("Expanded file: %s" % self._expanded_file)
errors, stderr = self._run_js_check([self._expanded_file],
out_file=out_file, externs=externs,
closure_args=closure_args)
filtered_errors = self._filter_errors(errors)
cleaned_errors = map(self._clean_up_error, filtered_errors)
output = self._format_errors(cleaned_errors)
if cleaned_errors:
prefix = "\n" if output else ""
self._log_error("Error in: %s%s%s" % (source_file, prefix, output))
elif output:
self._log_debug("Output: %s" % output)
self._nuke_temp_files()
return bool(cleaned_errors), stderr
def check_multiple(self, sources, out_file=None, externs=None,
closure_args=None):
"""Closure compile a set of files and check for errors.
Args:
sources: An array of files to check.
out_file: A file where the compiled output is written to.
externs: @extern files that inform the compiler about custom globals.
closure_args: Arguments passed directly to the Closure compiler.
Returns:
(found_errors, stderr) A boolean indicating whether errors were found and
the raw Closure Compiler stderr (as a string).
"""
errors, stderr = self._run_js_check(sources, out_file=out_file,
externs=externs,
closure_args=closure_args)
self._nuke_temp_files()
return bool(errors), stderr
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Typecheck JavaScript using Closure compiler")
parser.add_argument("sources", nargs=argparse.ONE_OR_MORE,
help="Path to a source file to typecheck")
single_file_group = parser.add_mutually_exclusive_group()
single_file_group.add_argument("--single-file", dest="single_file",
action="store_true",
help="Process each source file individually")
# TODO(twellington): remove --no-single-file and use len(opts.sources).
single_file_group.add_argument("--no-single-file", dest="single_file",
action="store_false",
help="Process all source files as a group")
parser.add_argument("-d", "--depends", nargs=argparse.ZERO_OR_MORE)
parser.add_argument("-e", "--externs", nargs=argparse.ZERO_OR_MORE)
parser.add_argument("-o", "--out-file", dest="out_file",
help="A file where the compiled output is written to")
parser.add_argument("-c", "--closure-args", dest="closure_args",
nargs=argparse.ZERO_OR_MORE,
help="Arguments passed directly to the Closure compiler")
parser.add_argument("-v", "--verbose", action="store_true",
help="Show more information as this script runs")
parser.set_defaults(single_file=True, strict=False)
opts = parser.parse_args()
depends = opts.depends or []
# TODO(devlin): should we run normpath() on this first and/or do this for
# depends as well?
externs = set(opts.externs or [])
sources = set(opts.sources)
polymer_externs = os.path.join(os.path.dirname(_CURRENT_DIR), 'polymer',
'v1_0', 'components-chromium',
'polymer-externs', 'polymer.externs.js')
externs.add(polymer_externs)
checker = Checker(verbose=opts.verbose, strict=opts.strict)
if opts.single_file:
for source in sources:
# Normalize source to the current directory.
source = os.path.normpath(os.path.join(os.getcwd(), source))
depends, externs = build.inputs.resolve_recursive_dependencies(
source, depends, externs)
found_errors, _ = checker.check(source, out_file=opts.out_file,
depends=depends, externs=externs,
closure_args=opts.closure_args)
if found_errors:
sys.exit(1)
else:
found_errors, stderr = checker.check_multiple(
sources,
out_file=opts.out_file,
externs=externs,
closure_args=opts.closure_args)
if found_errors:
print stderr
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.