gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
import abc
import os
import time
import markupsafe
import requests
from django.db import models
from framework.auth import Auth
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError, PermissionsError
from mako.lookup import TemplateLookup
from osf.models.base import BaseModel, ObjectIDMixin
from osf.models.external import ExternalAccount
from osf.models.node import AbstractNode
from osf.models.user import OSFUser
from osf.modm_compat import Q
from osf.utils.datetime_aware_jsonfield import DateTimeAwareJSONField
from website import settings
from addons.base import logger, serializer
from website.oauth.signals import oauth_complete
from website.util import waterbutler_url_for
lookup = TemplateLookup(
directories=[
settings.TEMPLATES_PATH
],
default_filters=[
'unicode', # default filter; must set explicitly when overriding
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'temp_ampersand_fixer',
'h',
],
imports=[
# FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it
# gets re-escaped by Markupsafe. See [#OSF-4432]
'from website.util.sanitize import temp_ampersand_fixer',
]
)
class BaseAddonSettings(ObjectIDMixin, BaseModel):
deleted = models.BooleanField(default=False)
class Meta:
abstract = True
@property
def config(self):
return self._meta.app_config
@property
def short_name(self):
return self.config.short_name
def delete(self, save=True):
self.deleted = True
self.on_delete()
if save:
self.save()
def undelete(self, save=True):
self.deleted = False
self.on_add()
if save:
self.save()
def to_json(self, user):
return {
'addon_short_name': self.config.short_name,
'addon_full_name': self.config.full_name,
}
#############
# Callbacks #
#############
def on_add(self):
"""Called when the addon is added (or re-added) to the owner (User or Node)."""
pass
def on_delete(self):
"""Called when the addon is deleted from the owner (User or Node)."""
pass
class BaseUserSettings(BaseAddonSettings):
owner = models.OneToOneField(OSFUser, blank=True, null=True, related_name='%(app_label)s_user_settings')
class Meta:
abstract = True
@property
def public_id(self):
return None
@property
def has_auth(self):
"""Whether the user has added credentials for this addon."""
return False
# TODO: Test me @asmacdo
@property
def nodes_authorized(self):
"""Get authorized, non-deleted nodes. Returns an empty list if the
attached add-on does not include a node model.
"""
model = self.config.node_settings
if not model:
return []
return [obj.owner for obj in model.objects.filter(user_settings=self, owner__is_deleted=False).select_related('owner')]
@property
def can_be_merged(self):
return hasattr(self, 'merge')
def to_json(self, user):
ret = super(BaseUserSettings, self).to_json(user)
ret['has_auth'] = self.has_auth
ret.update({
'nodes': [
{
'_id': node._id,
'url': node.url,
'title': node.title,
'registered': node.is_registration,
'api_url': node.api_url
}
for node in self.nodes_authorized
]
})
return ret
def __repr__(self):
if self.owner:
return '<{cls} owned by user {uid}>'.format(cls=self.__class__.__name__, uid=self.owner._id)
return '<{cls} with no owner>'.format(cls=self.__class__.__name__)
@oauth_complete.connect
def oauth_complete(provider, account, user):
if not user or not account:
return
user.add_addon(account.provider)
user.save()
class BaseOAuthUserSettings(BaseUserSettings):
# Keeps track of what nodes have been given permission to use external
# accounts belonging to the user.
oauth_grants = DateTimeAwareJSONField(default=dict, blank=True)
# example:
# {
# '<Node._id>': {
# '<ExternalAccount._id>': {
# <metadata>
# },
# }
# }
#
# metadata here is the specific to each addon.
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
serializer = serializer.OAuthAddonSerializer
class Meta:
abstract = True
@property
def has_auth(self):
return self.external_accounts.exists()
@property
def external_accounts(self):
"""The user's list of ``ExternalAccount`` instances for this provider"""
return self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
def delete(self, save=True):
for account in self.external_accounts.filter(provider=self.config.short_name):
self.revoke_oauth_access(account, save=False)
super(BaseOAuthUserSettings, self).delete(save=save)
def grant_oauth_access(self, node, external_account, metadata=None):
"""Give a node permission to use an ``ExternalAccount`` instance."""
# ensure the user owns the external_account
if not self.owner.external_accounts.filter(id=external_account.id).exists():
raise PermissionsError()
metadata = metadata or {}
# create an entry for the node, if necessary
if node._id not in self.oauth_grants:
self.oauth_grants[node._id] = {}
# create an entry for the external account on the node, if necessary
if external_account._id not in self.oauth_grants[node._id]:
self.oauth_grants[node._id][external_account._id] = {}
# update the metadata with the supplied values
for key, value in metadata.iteritems():
self.oauth_grants[node._id][external_account._id][key] = value
self.save()
@must_be_logged_in
def revoke_oauth_access(self, external_account, auth, save=True):
"""Revoke all access to an ``ExternalAccount``.
TODO: This should accept node and metadata params in the future, to
allow fine-grained revocation of grants. That's not yet been needed,
so it's not yet been implemented.
"""
for node in self.get_nodes_with_oauth_grants(external_account):
try:
node.get_addon(external_account.provider, deleted=True).deauthorize(auth=auth)
except AttributeError:
# No associated addon settings despite oauth grant
pass
if external_account.osfuser_set.count() == 1 and \
external_account.osfuser_set.filter(id=auth.user.id).exists():
# Only this user is using the account, so revoke remote access as well.
self.revoke_remote_oauth_access(external_account)
for key in self.oauth_grants:
self.oauth_grants[key].pop(external_account._id, None)
if save:
self.save()
def revoke_remote_oauth_access(self, external_account):
""" Makes outgoing request to remove the remote oauth grant
stored by third-party provider.
Individual addons must override this method, as it is addon-specific behavior.
Not all addon providers support this through their API, but those that do
should also handle the case where this is called with an external_account
with invalid credentials, to prevent a user from being unable to disconnect
an account.
"""
pass
def verify_oauth_access(self, node, external_account, metadata=None):
"""Verify that access has been previously granted.
If metadata is not provided, this checks only if the node can access the
account. This is suitable to check to see if the node's addon settings
is still connected to an external account (i.e., the user hasn't revoked
it in their user settings pane).
If metadata is provided, this checks to see that all key/value pairs
have been granted. This is suitable for checking access to a particular
folder or other resource on an external provider.
"""
metadata = metadata or {}
# ensure the grant exists
try:
grants = self.oauth_grants[node._id][external_account._id]
except KeyError:
return False
# Verify every key/value pair is in the grants dict
for key, value in metadata.iteritems():
if key not in grants or grants[key] != value:
return False
return True
def get_nodes_with_oauth_grants(self, external_account):
# Generator of nodes which have grants for this external account
for node_id, grants in self.oauth_grants.iteritems():
node = AbstractNode.load(node_id)
if external_account._id in grants.keys() and not node.is_deleted:
yield node
def get_attached_nodes(self, external_account):
for node in self.get_nodes_with_oauth_grants(external_account):
if node is None:
continue
node_settings = node.get_addon(self.oauth_provider.short_name)
if node_settings is None:
continue
if node_settings.external_account == external_account:
yield node
def merge(self, user_settings):
"""Merge `user_settings` into this instance"""
if user_settings.__class__ is not self.__class__:
raise TypeError('Cannot merge different addons')
for node_id, data in user_settings.oauth_grants.iteritems():
if node_id not in self.oauth_grants:
self.oauth_grants[node_id] = data
else:
node_grants = user_settings.oauth_grants[node_id].iteritems()
for ext_acct, meta in node_grants:
if ext_acct not in self.oauth_grants[node_id]:
self.oauth_grants[node_id][ext_acct] = meta
else:
for k, v in meta:
if k not in self.oauth_grants[node_id][ext_acct]:
self.oauth_grants[node_id][ext_acct][k] = v
user_settings.oauth_grants = {}
user_settings.save()
try:
config = settings.ADDONS_AVAILABLE_DICT[
self.oauth_provider.short_name
]
Model = config.models['nodesettings']
except KeyError:
pass
else:
connected = Model.find(Q('user_settings', 'eq', user_settings))
for node_settings in connected:
node_settings.user_settings = self
node_settings.save()
self.save()
def to_json(self, user):
ret = super(BaseOAuthUserSettings, self).to_json(user)
ret['accounts'] = self.serializer(
user_settings=self
).serialized_accounts
return ret
#############
# Callbacks #
#############
def on_delete(self):
"""When the user deactivates the addon, clear auth for connected nodes.
"""
super(BaseOAuthUserSettings, self).on_delete()
nodes = [AbstractNode.load(node_id) for node_id in self.oauth_grants.keys()]
for node in nodes:
node_addon = node.get_addon(self.oauth_provider.short_name)
if node_addon and node_addon.user_settings == self:
node_addon.clear_auth()
class BaseNodeSettings(BaseAddonSettings):
owner = models.OneToOneField(AbstractNode, null=True, blank=True, related_name='%(app_label)s_node_settings')
class Meta:
abstract = True
@property
def complete(self):
"""Whether or not this addon is properly configured
:rtype bool:
"""
raise NotImplementedError()
@property
def configured(self):
"""Whether or not this addon has had a folder connected.
:rtype bool:
"""
return self.complete
@property
def has_auth(self):
"""Whether the node has added credentials for this addon."""
return False
def to_json(self, user):
ret = super(BaseNodeSettings, self).to_json(user)
ret.update({
'user': {
'permissions': self.owner.get_permissions(user)
},
'node': {
'id': self.owner._id,
'api_url': self.owner.api_url,
'url': self.owner.url,
'is_registration': self.owner.is_registration,
},
'node_settings_template': os.path.basename(self.config.node_settings_template),
})
return ret
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param User user:
:param Node node:
"""
pass
def before_remove_contributor(self, node, removed):
"""
:param Node node:
:param User removed:
"""
pass
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
"""
pass
def before_make_public(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def before_make_private(self, node):
"""
:param Node node:
:returns: Alert message or None
"""
pass
def after_set_privacy(self, node, permissions):
"""
:param Node node:
:param str permissions:
"""
pass
def before_fork(self, node, user):
"""Return warning text to display if user auth will be copied to a
fork.
:param Node node:
:param Uder user
:returns Alert message
"""
if hasattr(self, 'user_settings'):
if self.user_settings is None:
return (
u'Because you have not configured the {addon} add-on, your authentication will not be '
u'transferred to the forked {category}. You may authorize and configure the {addon} add-on '
u'in the new fork on the settings page.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
elif self.user_settings and self.user_settings.owner == user:
return (
u'Because you have authorized the {addon} add-on for this '
u'{category}, forking it will also transfer your authentication to '
u'the forked {category}.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
else:
return (
u'Because the {addon} add-on has been authorized by a different '
u'user, forking it will not transfer authentication to the forked '
u'{category}. You may authorize and configure the {addon} add-on '
u'in the new fork on the settings page.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
def after_fork(self, node, fork, user, save=True):
"""
:param Node node:
:param Node fork:
:param User user:
:param bool save:
:returns: cloned settings
"""
clone = self.clone()
clone.user_settings = None
clone.owner = fork
if save:
clone.save()
return clone
def before_register(self, node, user):
"""
:param Node node:
:param User user:
:returns: Alert message
"""
pass
def after_register(self, node, registration, user, save=True):
"""
:param Node node:
:param Node registration:
:param User user:
:param bool save:
:returns: Tuple of cloned settings and alert message
"""
return None, None
def after_delete(self, node, user):
"""
:param Node node:
:param User user:
"""
pass
############
# Archiver #
############
class GenericRootNode(object):
path = '/'
name = ''
class BaseStorageAddon(BaseModel):
"""
Mixin class for traversing file trees of addons with files
"""
root_node = GenericRootNode()
class Meta:
abstract = True
@property
def archive_folder_name(self):
name = 'Archive of {addon}'.format(addon=self.config.full_name)
folder_name = getattr(self, 'folder_name', '').lstrip('/').strip()
if folder_name:
name = name + ': {folder}'.format(folder=folder_name)
return name
def _get_fileobj_child_metadata(self, filenode, user, cookie=None, version=None):
kwargs = dict(
provider=self.config.short_name,
path=filenode.get('path', ''),
node=self.owner,
user=user,
view_only=True,
)
if cookie:
kwargs['cookie'] = cookie
if version:
kwargs['version'] = version
metadata_url = waterbutler_url_for('metadata', _internal=True, **kwargs)
res = requests.get(metadata_url)
if res.status_code != 200:
raise HTTPError(res.status_code, data={'error': res.json()})
# TODO: better throttling?
time.sleep(1.0 / 5.0)
return res.json().get('data', [])
def _get_file_tree(self, filenode=None, user=None, cookie=None, version=None):
"""
Recursively get file metadata
"""
filenode = filenode or {
'path': '/',
'kind': 'folder',
'name': self.root_node.name,
}
if filenode.get('kind') == 'file' or 'size' in filenode:
return filenode
kwargs = {
'version': version,
'cookie': cookie,
}
filenode['children'] = [
self._get_file_tree(child, user, cookie=cookie)
for child in self._get_fileobj_child_metadata(filenode, user, **kwargs)
]
return filenode
class BaseOAuthNodeSettings(BaseNodeSettings):
# TODO: Validate this field to be sure it matches the provider's short_name
# NOTE: Do not set this field directly. Use ``set_auth()``
external_account = models.ForeignKey(ExternalAccount, null=True, blank=True,
related_name='%(app_label)s_node_settings')
# NOTE: Do not set this field directly. Use ``set_auth()``
# user_settings = fields.AbstractForeignField()
# The existence of this property is used to determine whether or not
# an addon instance is an "OAuth addon" in
# AddonModelMixin.get_oauth_addons().
oauth_provider = None
class Meta:
abstract = True
@abc.abstractproperty
def folder_id(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_id' property."
)
@abc.abstractproperty
def folder_name(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_name' property."
)
@abc.abstractproperty
def folder_path(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'folder_path' property."
)
def fetch_folder_name(self):
return self.folder_name
@property
def nodelogger(self):
auth = None
if self.user_settings:
auth = Auth(self.user_settings.owner)
self._logger_class = getattr(
self,
'_logger_class',
type(
'{0}NodeLogger'.format(self.config.short_name.capitalize()),
(logger.AddonNodeLogger,),
{'addon_short_name': self.config.short_name}
)
)
return self._logger_class(
node=self.owner,
auth=auth
)
@property
def complete(self):
return bool(
self.has_auth and
self.external_account and
self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
)
)
@property
def configured(self):
return bool(
self.complete and
(self.folder_id or self.folder_name or self.folder_path)
)
@property
def has_auth(self):
"""Instance has an external account and *active* permission to use it"""
return bool(
self.user_settings and self.user_settings.has_auth
) and bool(
self.external_account and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account
)
)
def clear_settings(self):
raise NotImplementedError(
"BaseOAuthNodeSettings subclasses must expose a 'clear_settings' method."
)
def set_auth(self, external_account, user, metadata=None, log=True):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
# tell the user's addon settings that this node is connected to it
user_settings = user.get_or_add_addon(self.oauth_provider.short_name)
user_settings.grant_oauth_access(
node=self.owner,
external_account=external_account,
metadata=metadata # metadata can be passed in when forking
)
user_settings.save()
# update this instance
self.user_settings = user_settings
self.external_account = external_account
if log:
self.nodelogger.log(action='node_authorized', save=True)
self.save()
def deauthorize(self, auth=None, add_log=False):
"""Remove authorization from this node.
This method should be overridden for addon-specific behavior,
such as logging and clearing non-generalizable settings.
"""
self.clear_auth()
def clear_auth(self):
"""Disconnect the node settings from the user settings.
This method does not remove the node's permission in the user's addon
settings.
"""
self.external_account = None
self.user_settings = None
self.save()
def before_remove_contributor_message(self, node, removed):
"""If contributor to be removed authorized this addon, warn that removing
will remove addon authorization.
"""
if self.has_auth and self.user_settings.owner == removed:
return (
u'The {addon} add-on for this {category} is authenticated by {name}. '
u'Removing this user will also remove write access to {addon} '
u'unless another contributor re-authenticates the add-on.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
name=removed.fullname,
)
# backwards compatibility
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""If removed contributor authorized this addon, remove addon authorization
from owner.
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings.oauth_grants[self.owner._id].pop(self.external_account._id)
self.user_settings.save()
self.clear_auth()
message = (
u'Because the {addon} add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
addon=self.config.full_name,
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""After forking, copy user settings if the user is the one who authorized
the addon.
:return: the cloned settings
"""
clone = super(BaseOAuthNodeSettings, self).after_fork(
node=node,
fork=fork,
user=user,
save=False,
)
if self.has_auth and self.user_settings.owner == user:
metadata = None
if self.complete:
try:
metadata = self.user_settings.oauth_grants[node._id][self.external_account._id]
except (KeyError, AttributeError):
pass
clone.set_auth(self.external_account, user, metadata=metadata, log=False)
else:
clone.clear_settings()
if save:
clone.save()
return clone
def before_register_message(self, node, user):
"""Return warning text to display if user auth will be copied to a
registration.
"""
if self.has_auth:
return (
u'The contents of {addon} add-ons cannot be registered at this time; '
u'the {addon} add-on linked to this {category} will not be included '
u'as part of this registration.'
).format(
addon=self.config.full_name,
category=node.project_or_component,
)
# backwards compatibility
before_register = before_register_message
def serialize_waterbutler_credentials(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_credentials' method.")
def serialize_waterbutler_settings(self):
raise NotImplementedError("BaseOAuthNodeSettings subclasses must implement a \
'serialize_waterbutler_settings' method.")
class BaseCitationsNodeSettings(BaseOAuthNodeSettings):
class Meta:
abstract = True
def serialize_waterbutler_settings(self, *args, **kwargs):
# required by superclass, not actually used
pass
def serialize_waterbutler_credentials(self, *args, **kwargs):
# required by superclass, not actually used
pass
def create_waterbutler_log(self, *args, **kwargs):
# required by superclass, not actually used
pass
@property
def api(self):
"""authenticated ExternalProvider instance"""
if self._api is None:
self._api = self.oauth_provider(account=self.external_account)
return self._api
@property
def complete(self):
"""Boolean indication of addon completeness"""
return bool(self.has_auth and self.user_settings.verify_oauth_access(
node=self.owner,
external_account=self.external_account,
metadata={'folder': self.list_id},
))
@property
def root_folder(self):
"""Serialized representation of root folder"""
return self.serializer.serialized_root_folder
@property
def folder_id(self):
return self.list_id
@property
def folder_name(self):
return self.fetch_folder_name
@property
def folder_path(self):
return self.fetch_folder_name
@property
def fetch_folder_name(self):
"""Returns a displayable folder name"""
if self.list_id is None:
return ''
elif self.list_id == 'ROOT':
return 'All Documents'
else:
return self._fetch_folder_name
def clear_settings(self):
"""Clears selected folder configuration"""
self.list_id = None
def set_auth(self, *args, **kwargs):
"""Connect the node addon to a user's external account.
This method also adds the permission to use the account in the user's
addon settings.
"""
self.list_id = None
self.save()
return super(BaseCitationsNodeSettings, self).set_auth(*args, **kwargs)
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
if add_log:
self.owner.add_log(
'{0}_node_deauthorized'.format(self.provider_name),
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_settings()
self.clear_auth()
self.save()
def after_delete(self, node=None, user=None):
self.deauthorize(Auth(user=user), add_log=True)
def on_delete(self):
self.deauthorize(add_log=False)
| |
#
# Explore
# - The Adventure Interpreter
#
# Copyright (C) 2006 Joe Peterson
#
import string
import zlib
import base64
import ExpIO
import ExpUtil
from Player import *
from Room import *
from Command import *
RESULT_NOTHING = 0
RESULT_NORMAL = 1
RESULT_DESCRIBE = 2
RESULT_WIN = 4
RESULT_DIE = 8
RESULT_END_GAME = 16
RESULT_NO_CHECK = 32
RESULT_SUSPEND = 64
RESULT_RESUME = 128
SUSPEND_INTERACTIVE = 0
SUSPEND_QUIET = 1
LONG_DIRECTION_COMMANDS = ["NORTH", "SOUTH", "EAST", "WEST", "UP", "DOWN"]
DIRECTIONS = []
for dir in LONG_DIRECTION_COMMANDS:
DIRECTIONS.append(dir[0])
DIRECTION_COMMANDS = DIRECTIONS + LONG_DIRECTION_COMMANDS
class World:
def __init__(self, exp_io):
self.exp_io = exp_io
self.title = "This adventure has no title!"
self.rooms = {}
self.commands = []
self.player = Player(exp_io)
self.suspend_mode = SUSPEND_INTERACTIVE
self.last_suspend = None
self.trs_compat = False
def load(self, filename):
start_room = None
first_room = None
new_room = None
new_command = None
cur_room_name = None
file_stream = file(filename)
for line in file_stream:
line = string.translate(string.strip(line), \
string.maketrans('\\', '\n'))
if line.find("=") != -1:
keyword, params = line.split("=", 1)
else:
keyword = line
params = None
if keyword == "TITLE":
self.title = params[:]
elif keyword == "START_ROOM":
start_room = params[:]
elif keyword == "INVENTORY_LIMIT":
self.player.item_limit = string.atoi(params)
elif keyword == "ROOM":
new_room = Room()
new_room.name = params[:]
self.rooms[new_room.name] = new_room
if first_room == None:
first_room = new_room
cur_room_name = new_room.name
new_command = None
elif keyword == "LOCAL":
cur_room_name = params[:]
new_room = None
new_command = None
elif keyword == "GLOBAL":
cur_room_name = None
new_room = None
new_command = None
elif keyword == "COMMAND":
new_command = Command()
self.commands.append(new_command)
if cur_room_name != None:
new_command.location = cur_room_name[:]
if params[0] == "+":
new_command.condition = params[1:]
elif params[0] == "-":
new_command.condition = params[:]
else:
pos = params.find(":")
if pos != -1:
if params[pos + 1] == "+":
new_command.condition = params[pos + 2:]
else:
new_command.condition = params[pos + 1:]
new_command.commands = params[:pos].split(",")
else:
new_command.commands = params.split(",")
elif keyword == "ACTION":
# If there is no current command, make one.
if new_command == None:
new_command = Command()
self.commands.append(new_command)
if cur_room_name != None:
new_command.location = cur_room_name[:]
for action in params.split(";"):
new_command.add_action(action)
#new_command.actions.append(action)
elif keyword == "DESC":
if new_room != None:
new_room.desc = params[:]
elif keyword == "ALT_DESC":
if new_room != None:
new_room.desc_alt = params[:]
elif keyword == "DESC_CONTROL":
if new_room != None:
new_room.desc_ctrl = params[:]
elif keyword == "CONTENTS":
if new_room != None:
new_room.items = params.split(",")
elif keyword == "NORTH":
if new_room != None:
new_room.init_neighbor("N", params[:])
elif keyword == "SOUTH":
if new_room != None:
new_room.init_neighbor("S", params[:])
elif keyword == "EAST":
if new_room != None:
new_room.init_neighbor("E", params[:])
elif keyword == "WEST":
if new_room != None:
new_room.init_neighbor("W", params[:])
elif keyword == "UP":
if new_room != None:
new_room.init_neighbor("U", params[:])
elif keyword == "DOWN":
if new_room != None:
new_room.init_neighbor("D", params[:])
if self.rooms.has_key(start_room):
self.player.current_room = self.rooms[start_room]
elif first_room != None:
self.player.current_room = first_room
else:
self.player.current_room = Room()
self.player.current_room.name = "limbo"
self.player.current_room.desc = \
"This adventure has no rooms. You are in limbo!"
file_stream.close()
def take_action(self, command, auto):
result = RESULT_NORMAL
error = False
if len(command.actions) == 0 or len(command.actions[0]) == 0 or command.actions[0][0] == "^":
result = RESULT_NOTHING
if not auto:
self.exp_io.tell("Nothing happens.")
else:
for action in command.actions:
if action.find(":") != -1:
action, message = action.split(":", 1)
else:
message = None
if len(action) == 0:
action = None
if action != None:
if action[0] == "/":
if self.rooms.has_key(action[1:]):
room = self.rooms[action[1:]]
self.player.current_room = room
result |= RESULT_DESCRIBE
elif action[0] == "!":
self.exp_io.tell("")
self.exp_io.tell(action[1:])
#self.exp_io.tell("")
#self.exp_io.tell("It took you ? moves to win.")
result |= RESULT_WIN
result |= RESULT_END_GAME
elif action[0] == "=":
result |= self.process_command(action[1:], False)
elif action[0] == "%":
if action.find(",") != -1:
old_item, new_item = action[1:].split(",", 1)
if self.player.remove_item(old_item):
self.player.add_item(new_item, True)
elif self.player.current_room.remove_item(old_item):
self.player.current_room.add_item(new_item, True)
else:
self.exp_io.tell("You can't do that yet.")
error = True
elif action[0] == "+":
if action[1] == "$":
if not self.player.add_item(action[2:], False):
self.exp_io.tell("You are carrying too much to do that.")
error = True
else:
command.actions[0] = "^" + command.actions[0]
else:
self.player.current_room.add_item(action[1:], True)
command.actions[0] = "^" + command.actions[0]
elif action[0] == "-":
if not self.player.remove_item(action[1:]):
if not self.player.current_room.remove_item(action[1:]):
self.exp_io.tell("You can't do that yet.")
error = True
elif action[0] == "#":
if action.find(">") != -1:
room_name, item = action[1:].split(">", 1)
if self.player.remove_item(item) or self.player.current_room.remove_item(item):
if self.rooms.has_key(room_name):
self.rooms[room_name].add_item(item, True)
else:
self.exp_io.tell("Wow, I think somthing just left our universe!")
else:
self.exp_io.tell("You can't do that yet.")
error = True
elif action[0] == "[":
if action[1] == "$":
self.player.current_room.block_way(action[2])
else:
self.player.current_room.make_way(action[1], action[2:])
command.actions[0] = "^" + command.actions[0]
elif action[0] == "*":
if self.player.current_room.desc_ctrl != None:
if action[1] == "+":
if not (len(self.player.current_room.desc_ctrl) > 0 and self.player.current_room.desc_ctrl[-1] == "+"):
self.player.current_room.desc_ctrl += "+"
else:
if len(self.player.current_room.desc_ctrl) > 0 and self.player.current_room.desc_ctrl[-1] == "+":
self.player.current_room.desc_ctrl = self.player.current_room.desc_ctrl[:-1]
else:
self.exp_io.tell("")
self.exp_io.tell(action)
result |= RESULT_DIE
result |= RESULT_END_GAME
if error:
break
if message != None:
if (result & RESULT_DESCRIBE) != 0:
self.exp_io.tell("")
self.exp_io.tell(message)
if error or (auto and result == RESULT_NORMAL):
return RESULT_NOTHING
else:
return result
def check_for_auto(self):
result = RESULT_NOTHING
for c in self.commands:
if (c.location == None or \
c.location == self.player.current_room.name) and \
len(c.commands) == 0:
if c.condition == None or \
(c.condition[0] == "-" and \
not self.player.has_item(c.condition[1:])) or \
(c.condition != "-" and \
self.player.has_item(c.condition)):
result |= self.take_action(c, True)
return result
def find_custom(self, cmd, r):
global_candidate = None
candidate = None
for c in self.commands:
if cmd in c.commands:
# Give priority to commands that are specific to
# this room (if specified), otherwise remember it
# as a candidate.
if r == None or c.location == r.name:
return c
elif c.location == None:
global_candidate = c
else:
candidate = c
if global_candidate != None:
return global_candidate
else:
return candidate
def process_command(self, wish, acknowledge):
result = RESULT_NORMAL
player_meets_condition = False
player_in_correct_room = False
command = None
argument = None
verbatim_argument = ""
# Save the argument before case conversion in case someone needs it.
pos = wish.find(" ")
if pos != -1:
verbatim_argument = wish[pos + 1:]
wish = wish.upper()
custom = self.find_custom(wish, self.player.current_room)
if custom != None:
if custom.condition == None or \
(custom.condition[0] == "-" and \
not self.player.has_item(custom.condition[1:])) or \
(custom.condition != "-" and \
self.player.has_item(custom.condition)):
player_meets_condition = True
if custom.location == None or \
self.player.current_room.name == custom.location:
player_in_correct_room = True
try_builtin = True
if self.trs_compat:
if custom != None and player_in_correct_room:
try_builtin = False
if player_meets_condition:
result = self.take_action(custom, False)
else:
self.exp_io.tell("You can't do that yet.")
else:
if custom != None and player_in_correct_room and player_meets_condition:
try_builtin = False
result = self.take_action(custom, False)
if try_builtin:
if wish.find(" ") != -1:
command, argument = wish.split(None, 1)
else:
command = wish[:]
wants_to_walk = False
goto_room = None
if command == "GO":
if argument == None:
self.exp_io.tell("Go where?")
elif argument in DIRECTION_COMMANDS:
self.exp_io.tell('No need to say "go" for the simple directions.')
else:
self.exp_io.tell("I'm not sure how to get there. Try a direction.")
elif command == "LOOK":
if argument != None:
self.exp_io.tell("There's really nothing more to see.")
result |= RESULT_DESCRIBE
elif command in DIRECTION_COMMANDS and argument == None:
wants_to_walk = True
goto_room = self.player.current_room.neighbor(command[0])
elif command == "HELP":
self.exp_io.tell("""
These are some of the commands you may use:
NORTH or N (go north)
SOUTH or S (go south)
EAST or E (go east)
WEST or W (go west)
UP or U (go up)
DOWN or D (go down)
INVENT (see your inventory - what you are carrying)
LOOK (see where you are)
SUSPEND (save game to finish later)
RESUME (take up where you left off last time)
QUIT or STOP (quit game)
""")
elif (command == "QUIT" or \
command == "STOP") and \
argument == None:
if acknowledge:
self.exp_io.tell("Ok")
result |= RESULT_END_GAME
elif command == "GET" or command == "TAKE":
if argument != None:
self.player.get_item(argument, acknowledge)
else:
self.exp_io.tell("Get what?")
elif command == "DROP" or command == "THROW":
if argument != None:
self.player.drop_item(argument, acknowledge)
else:
self.exp_io.tell("Drop what?")
elif (command == "INVENT" or \
command == "INVENTORY") and \
argument == None:
self.player.list_items()
elif command == "SUSPEND" or command == "SAVE":
#self.exp_io.tell("Sorry, suspend has not yet been implemented.")
if self.suspend_mode == SUSPEND_INTERACTIVE:
self.exp_io.tell("")
self.exp_io.tell("OK, grab the following long line and save it away somewhere.")
self.exp_io.tell("This will be the command you use to resume your game:")
self.exp_io.tell("")
self.exp_io.tell("resume " + self.get_state())
self.exp_io.tell("")
elif self.suspend_mode == SUSPEND_QUIET:
if acknowledge:
self.exp_io.tell("Ok")
result |= RESULT_SUSPEND
elif command == "RESUME" or command == "RESTORE":
#self.exp_io.tell("Sorry, resume has not yet been implemented.")
if self.suspend_mode == SUSPEND_INTERACTIVE:
if argument == None:
self.exp_io.tell("Please follow this command with the code you were given")
self.exp_io.tell("when you suspended your game.")
else:
if not self.set_state(verbatim_argument):
self.exp_io.tell("Hmm, that resume code just doesn't seem to make sense! Sorry.")
else:
result |= (RESULT_DESCRIBE | RESULT_NO_CHECK)
elif self.suspend_mode == SUSPEND_QUIET:
if self.last_suspend != None:
if not self.set_state(self.last_suspend):
self.exp_io.tell("Hmm, the suspended game information doesn't look valid. Sorry.")
else:
result |= (RESULT_DESCRIBE | RESULT_NO_CHECK)
else:
self.exp_io.tell("Hmm, there seems to be no suspended game information. Sorry.")
elif custom != None:
if self.trs_compat:
self.exp_io.tell("You can't do that here.")
else:
if not player_in_correct_room:
self.exp_io.tell("You can't do that here.")
else:
self.exp_io.tell("You can't do that yet.")
else:
self.exp_io.tell("I don't understand.")
if wants_to_walk:
if self.rooms.has_key(goto_room):
room = self.rooms[goto_room]
self.player.current_room = room
result |= RESULT_DESCRIBE
else:
self.exp_io.tell("You can't go that way.")
return result
def get_state(self):
# our current room
buf = [self.player.current_room.name]
# what we're carrying
buf.append(string.join(self.player.items, ','))
# and the command numbers having actions that have been "done"
command_buf = []
for command in self.commands:
if len(command.actions) > 0 and len(command.actions[0]) > 0 and command.actions[0][0] == "^":
command_buf.append("^")
else:
command_buf.append(".")
buf.append(string.join(command_buf, ''))
# now the room details that have changed
for room_name, room in sorted(self.rooms.iteritems()):
if room.desc_ctrl != None and len(room.desc_ctrl) > 0 and room.desc_ctrl[-1] == "+":
room_data_buf = ["+"]
else:
room_data_buf = ["."]
for dir in DIRECTIONS:
room_data_buf.append(room.neighbor_save_string(dir))
#if len(room_data_string) > 7 and room_data_string[-8:] == ".:::::::":
# buf.append(room_data_string[:-8])
#elif len(room_data_string) > 6 and room_data_string[-7:] == ":::::::":
# buf.append(room_data_string[:-6])
#else:
# the items in the room
room_data_buf.append(string.join(room.items, ','))
buf.append(string.join(room_data_buf, ':'))
buf_string = string.join(buf, ';')
checksum = 0
for i in range(len(buf_string)):
checksum += ord(buf_string[i])
#print "Raw string: " + chr(((checksum >> 6) & 0x3f) + 0x21) + chr((checksum & 0x3f) + 0x21) + buf_string
return base64.b64encode(zlib.compress(chr(((checksum >> 6) & 0x3f) + 0x21) + chr((checksum & 0x3f) + 0x21) + buf_string))
def set_state(self, s):
try:
state_str = zlib.decompress(base64.b64decode(s))
except:
return False
if len(state_str) < 2:
return False
checksum = 0
for i in range(2, len(state_str)):
checksum += ord(state_str[i])
checksum_str = chr(((checksum >> 6) & 0x3f) + 0x21) + chr((checksum & 0x3f) + 0x21)
if checksum_str != state_str[:2]:
return False
parts = state_str[2:].split(';')
if len(self.rooms) != len(parts) - 3:
return False
if len(self.commands) != len(parts[2]):
return False
# Recover the current room.
prev_room = self.player.current_room
try:
self.player.current_room = self.rooms[parts[0]]
except KeyError:
# If the room name is invalid, recover the previous location and
# return error status.
self.player.current_room = prev_room
return False
# Recover the player's items.
if parts[1] == "":
new_player_items = []
else:
new_player_items = parts[1].split(',')
# If the player now has more than he can carry, which should never
# happen, recover the previous location and return error status.
if len(new_player_items) > self.player.item_limit:
self.player.current_room = prev_room
return False
else:
self.player.items = new_player_items
# Recover the state of the actions.
command_idx = 0
for command in self.commands:
if len(command.actions) > 0:
if parts[2][command_idx] == '^' and (len(command.actions[0]) == 0 or command.actions[0][0] != '^'):
command.actions[0] = "^" + command.actions[0]
elif parts[2][command_idx] != '^' and (len(command.actions[0]) > 0 and command.actions[0][0] == '^'):
command.actions[0] = command.actions[0][1:]
command_idx += 1
# Recover the room details.
room_idx = 0
for room_name, room in sorted(self.rooms.iteritems()):
room_code = parts[room_idx + 3].split(':')
#if len(room_code) != 8:
# old = room_code
# room_code = new String[8]
# if (old.length == 1)
# {
# room_code[0] = "."
# room_code[1] = ""
# room_code[2] = ""
# room_code[3] = ""
# room_code[4] = ""
# room_code[5] = ""
# room_code[6] = ""
# room_code[7] = old[0]
# }
# else if (old.length == 2)
# {
# room_code[0] = old[0]
# room_code[1] = ""
# room_code[2] = ""
# room_code[3] = ""
# room_code[4] = ""
# room_code[5] = ""
# room_code[6] = ""
# room_code[7] = old[1]
# }
# else
# {
# # How would we recover from this? For now, don't handle,
# # since we do not use this code.
# #return False
# }
# }
# first the description control
if room.desc_ctrl != None:
if room_code[0] == '+' and (len(room.desc_ctrl) == 0 or room.desc_ctrl[-1] != "+"):
room.desc_ctrl += "+"
elif room_code[0] != "+" and (len(room.desc_ctrl) > 0 and room.desc_ctrl[-1] == '+'):
room.desc_ctrl = room.desc_ctrl[:-1]
# now the possible directions
# Remove anything after the "^", since we just want the
# "current room" (note that this is for backwards compatibility
# with the old save format, which saved the new room in the
# following format: "curr_room^orig_room".
for i in range(1, 7):
if len(room_code[i]) > 0 and room_code[i][0] != "^":
pos = room_code[i].find("^")
if pos != -1:
room_code[i] = room_code[i][:pos]
dir = DIRECTIONS[i - 1]
if room_code[i] == "":
room.revert_neighbor(dir)
elif room_code[i][0] == "^":
room.block_way(dir)
else:
room.make_way(dir, room_code[i])
# now the contents
if room_code[7] == "":
room.items = []
else:
room.items = room_code[7].split(',')
room_idx += 1
return True
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
This test basically just plays around with image.rollimg.
It has three examples
* image_reduce: this takes an Image having, say, an axis 't' and returns another
Image having reduced over 't'
* need_specific_axis_reduce: this takes an Image and a specific
axis name, like 't' and produces an Image reduced over 't'. raises an
exception if Image has no axis 't'
* image_call: this takes an Image having, say, an axis 't'
and does something along this axis -- like fits a regression model? and
outputs a new Image with the 't' axis replaced by 'new'
* image_modify_copy: this takes an Image and an axis specification,
such as 'x+LR', 'l', or 2, modifies a copy of the data by iterating over this
axis, and returns an Image with the same axes
Notes
-----
In these loaded Images, 't' is both an axis name and a world coordinate name so
it is not ambiguous to say 't' axis. It is slightly ambiguous to say 'x+LR' axis
if the axisnames are ['slice', 'frequency', 'phase'] but image.rollimg
identifies 'x+LR' == 'slice' == 0.
"""
from __future__ import absolute_import
import numpy as np
from ..image import (Image, rollimg, synchronized_order)
from ...reference.coordinate_map import (AffineTransform as AT, drop_io_dim,
AxisError)
from ...reference.coordinate_system import CoordinateSystem as CS
from ...reference.spaces import mni_csm
from ...image.image_spaces import xyz_affine
from nose.tools import (assert_raises, assert_equal)
from numpy.testing import assert_almost_equal, assert_array_equal
MNI3 = mni_csm(3)
MNI4 = mni_csm(4)
def image_reduce(img, reduce_op, axis='t'):
"""
Take an Image, perform some reduce operation on it, over
a specified axis, and return a new Image.
For the sake of testing things out, we will assume that
the operation reduces over the first axis only.
Parameters
----------
image : Image
reduce_op : callable
An operation that reduces over the first axis,
such as lambda x: x.sum(0)
axis : str or int
Specification of axis of Image
Returns
-------
newim : Image, missing axis
"""
img = rollimg(img, axis)
axis_name = img.axes.coord_names[0]
output_axes = list(img.axes.coord_names)
output_axes.remove(axis_name)
newdata = reduce_op(img.get_data())
return Image(newdata, drop_io_dim(img.coordmap, axis))
def need_specific_axis_reduce(img, reduce_op):
"""
Take an Image, perform some reduce operation on it, over the axis named
'specific', and return a new Image.
For the sake of testing things out, we will assume that the operation
reduces over the first axis only.
Parameters
----------
img : Image
reduce_op : callable
An operation that reduces over the first axis,
such as lambda x: x.sum(0)
Returns
-------
newim : Image, missing axis
"""
return image_reduce(img, reduce_op, 'specific')
def image_call(img, function, inaxis='t', outaxis='new'):
"""
Take an Image, perform some operation on it, over a specified axis, and
return a new Image.
For the sake of testing things out, we will assume that the operation can
only operate on the first axis of the array.
Parameters
----------
img : Image
function : callable
An operation that does something over the first axis,
such as lambda x: x[::2]
inaxis : str or int
Specification of axis of Image
outaxis : str
Name of new axis in new Image
Returns
-------
newim : Image
with axis `inaxis` replaced with `outaxis`
"""
rolled_img = rollimg(img, inaxis)
inaxis = rolled_img.axes.coord_names[0] # now it's a string
newdata = function(rolled_img.get_data())
new_coordmap = rolled_img.coordmap.renamed_domain({inaxis: outaxis})
new_image = Image(newdata, new_coordmap)
# we have to roll the axis back
axis_index = img.axes.index(inaxis) + 1
return rollimg(new_image, 0, axis_index)
def image_modify(img, modify, axis='y+PA'):
"""
Take an Image, perform some operation on it, over a specified axis, and
return a new Image.
For this operation, we are allowed to iterate over spatial axes.
For the sake of testing things out, we will assume that the operation modify
can only operate by iterating over the first axis of an array.
Parameters
----------
img : Image
modify : callable
An operation that modifies an array. Something like::
def f(x):
x[:] = x.mean()
axis : str or int
Specification of axis of Image
Returns
-------
newim : Image
with a modified copy of img._data.
"""
rolled_img = rollimg(img, axis)
data = rolled_img.get_data().copy()
for d in data:
modify(d)
import copy
new_image = Image(data, copy.copy(rolled_img.coordmap))
# Now, we have to put the data back to same order as img
return synchronized_order(new_image, img)
def test_reduce():
shape = (3, 5, 7, 9)
x = np.random.standard_normal(shape)
im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1])))
newim = image_reduce(im, lambda x: x.sum(0), 'q')
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk'))
assert_equal(newim.shape, (3, 5, 7))
assert_almost_equal(newim.get_data(), x.sum(3))
im_nd = Image(x, AT(CS('ijkq'), MNI4, np.array(
[[0, 1, 2, 0, 10],
[3, 4, 5, 0, 11],
[6, 7, 8, 0, 12],
[0, 0, 0, 9, 13],
[0, 0, 0, 0, 1]])))
for i, o, n in zip('ijk', MNI3.coord_names, range(3)):
for axis_id in (i, o, n):
# Non-diagonal reduce raise an error
assert_raises(AxisError, image_reduce, im_nd,
lambda x: x.sum(0), axis_id)
# Diagonal reduces are OK
newim = image_reduce(im, lambda x: x.sum(0), axis_id)
def test_specific_reduce():
shape = (3, 5, 7, 9)
x = np.random.standard_normal(shape)
im = Image(x, AT(CS('ijkq'), MNI4, np.diag([3, 4, 5, 6, 1])))
# we have to rename the axis before we can call the function
# need_specific_axis_reduce on it
assert_raises(AxisError, need_specific_axis_reduce, im, lambda x: x.sum(0))
im = im.renamed_axes(q='specific')
newim = need_specific_axis_reduce(im, lambda x: x.sum(0))
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk'))
assert_equal(newim.shape, (3, 5, 7))
assert_almost_equal(newim.get_data(), x.sum(3))
def test_call():
shape = (3, 5, 7, 12)
x = np.random.standard_normal(shape)
affine = np.eye(5)
affine[:3, :3] = np.random.standard_normal((3, 3))
affine[:4, 4] = np.random.standard_normal((4,))
im = Image(x, AT(CS('ijkq'), MNI4, affine))
newim = image_call(im, lambda x: x[::2], 'q', 'out')
assert_array_equal(xyz_affine(im), xyz_affine(newim))
assert_equal(newim.axes.coord_names, tuple('ijk') + ('out',))
assert_equal(newim.shape, (3, 5, 7, 6))
assert_almost_equal(newim.get_data(), x[:,:,:,::2])
def test_modify():
shape = (3, 5, 7, 12)
x = np.random.standard_normal(shape)
affine = np.eye(5)
affine[:3, :3] = np.random.standard_normal((3, 3))
affine[:4, 4] = np.random.standard_normal((4,))
im = Image(x, AT(CS('ijkq'), MNI4, affine))
def nullmodify(d):
pass
def meanmodify(d):
d[:] = d.mean()
for i, o, n in zip('ijkq', MNI3.coord_names + ('q',), range(4)):
for a in i, o, n:
nullim = image_modify(im, nullmodify, a)
meanim = image_modify(im, meanmodify, a)
assert_array_equal(nullim.get_data(), im.get_data())
assert_array_equal(xyz_affine(im), xyz_affine(nullim))
assert_equal(nullim.axes, im.axes)
# yield assert_equal, nullim, im
assert_array_equal(xyz_affine(im), xyz_affine(meanim))
assert_equal(meanim.axes, im.axes)
# Make sure that meanmodify works as expected
d = im.get_data()
d = np.rollaxis(d, n)
meand = meanim.get_data()
meand = np.rollaxis(meand, n)
for i in range(d.shape[0]):
assert_almost_equal(meand[i], d[i].mean())
| |
__author__ = 'Dennis'
from copy import copy
class Entity(object):
"""
The basic component in the simulation.
An Entity can perform actions and be acted on itself, and it can observe
It can observe other Entities.
An Entity is a self-contained unit and should not have any references
directly (in its Physical State) to other Entities in a possible World.
If an Entity has a reference at all, it is one that is in its Internal
State, grounded in experience through its Senses.
Actions are local to Entities: They change their internal (physical)
state.
The consequences of this, among with the consequences of the entity's
physics, are used to have interactions between Entities.
Attributes
----------
name : string
A name for identifying purposes (for example, in the log).
log : Log
Log to use to document changes in this Entity.
attributes : {name: value}
The attributes constituting the physical representation of the Entity.
attribute_values : {name: []}
List of possible values for every attribute.
sensors : [Sensor]
observations : {name: value}
physics : function
a function that changes the state using only the state's
attributes
emission : function
Returns a list of signals to be emitted in this frame, based on the
Entity's internal state.
actions : {name: (function, [value])}
All possible actions identified by their name, with the function that
describes how its parameters influence the internal state,
a list/generator of all possible values.
default_action : {name: value}
A default action that is considered to be equivalent to the absence
of the action.
events : {name: function(old, new)}
Specifies for every attribute what events it triggers when it changes.
The functions return an event.
An event is a tuple of (name, {name: value}) of event name and its
parameters name/value pairs.
triggers : {name: function(self, ...)}
callback functions that change the attributes when called
mechanisms : Agent
motor_signal_queue : [(name, value)]
All action/parameter pairs that are queued to be executed.
Both name and its parameter name/value pairs are provided.
"""
def __init__(self, name, agent=None, visual=None):
self.name = name
self.log = None
self.attributes = {}
self.a = self.attributes
self.attribute_values = {}
self.sensors = []
self.observations = {}
self.physics = lambda x: None
self.emission = lambda x: []
self.actions = {}
self.default_action = {}
self.events = {}
self.triggers = {}
self.agent = agent
self.visual = visual
self.motor_signal_queue = []
self.signal_queue = []
self.event_queue = []
def start(self):
"""
Called when the experiment starts.
"""
if self.agent is not None:
self.agent.init_internal(self)
def try_change(self, attribute, value):
"""
Checks to see if setting the specified attribute's value is different from the
current value, sets the attribute and notifies.
Parameters
----------
attribute : string
value : value
Returns
-------
bool
True if the attribute changes, False otherwise
"""
# The emission function is obscure.
# When attributes change, the modality these attributes are in should
# determine whether events/signals are sent or not.
if self.a[attribute] != value:
old = self.a[attribute]
self.a[attribute] = value
# Call the event for this change
event = None
if self.events[attribute] is not None:
event = self.events[attribute](old, value)
self.log.do_log("event", {"name": self.name, "attribute": attribute, "old": old, "new": value})
if event is not None:
e, params = event
self.event_queue.append((attribute, e, params))
return True
return False
def set_log(self, log):
"""
Parameters
----------
log : Log
Log to use.
"""
self.log = log
if self.agent is not None:
self.agent.set_log(log)
def add_observation(self, observation):
self.observations.update(observation)
def queue_motor_signals(self):
"""
Queues actions to be executed by consulting associated Agent, if available.
See Also
--------
easl.mechanisms.Agent.act : Functionality delegated to Agent.
"""
if self.agent is None:
self.motor_signal_queue = []
return
# pass all observations to mechanisms and have it convert to internal representation
for observation in self.observations:
self.log.do_log("observation",
{"entity": self.name, "observation": observation, "value": self.observations[observation]})
self.agent.sense((observation, self.observations[observation]))
self.observations = {}
# Also add internal representation as observations
for observation in self.attributes:
self.log.do_log("observation",
{"entity": self.name, "observation": observation, "value": self.attributes[observation]})
self.agent.sense((observation, self.attributes[observation]))
# ask mechanisms to give actions
self.motor_signal_queue = self.agent.act()
def add_attribute(self, name, initial_value, values, event):
"""
Parameters
----------
name : string
Name to identify the attribute by.
initial_value : value
Any value that the attribute is set to when the experiment begins.
event : function(old, new) : (name, value)
Function that is called when the attribute changes.
The function receives the old and new values and should return an
event, i.e. a name and value pair.
"""
self.attributes[name] = initial_value
self.attribute_values[name] = values
self.events[name] = event
def add_action(self, name, values, default, f):
"""
Adds an action to the possible actions.
Defining Actions:
name, [{paramname: [values]}], function
Parameters
----------
name : string
name the action will be identified/called by
values : [values]
Possible values for this action.
default : value
Default value to be used when the action is absent.
Considered to be equivalent to doing no action.
f : function
callback that is called for an entity when the action is performed
"""
self.actions[name] = (f, values)
self.default_action[name] = default
def add_sensor(self, sensor):
sensor.set_observations(self.observations)
self.sensors.append(sensor)
def add_trigger(self, name, trigger):
"""
A Trigger changes the Entity's internal state if a match for a
cause was found.
"""
self.triggers[name] = trigger
def set_physics(self, physics):
self.physics = physics
def set_agent(self, agent):
self.agent = agent
def set_emission(self, emission):
self.emission = emission
def execute_actions(self):
"""
Calls all queued actions and clears the queue.
"""
while len(self.motor_signal_queue) > 0:
name, value = self.motor_signal_queue.pop(0)
self.log.do_log("action", {"entity": self.name, "name": name, "value": value})
parameters = {"self": self, "value": value}
self.actions[name][0](**parameters)
def emit_signals(self):
emitting = self.emission(self)
for signal in emitting:
self.log.do_log("emission", {"entity": self.name, "name": signal.sig_type, "value": signal.value})
self.signal_queue.append(signal)
def get_queued_signals(self):
"""
Pass all the queued signals so far and clear the queue.
"""
signals = copy(self.signal_queue)
self.signal_queue = []
return signals
def call_trigger(self, name, params):
if name in self.triggers:
self.log.do_log("trigger", {"name": name})
params["self"] = self
self.triggers[name](**params)
def is_active(self):
"""
If the entity performs any actions, i.e. has an associated mechanisms.
"""
return self.agent is not None
def measure(self):
"""
Log all attribute values.
Parameters
----------
name : string
Name to identify the measurement by.
"""
measurement = copy(self.attributes)
measurement["entity"] = self.name
self.log.do_log("measurement", measurement)
def visualize(self):
"""
Creates a Visualization from the attributes.
:return:
"""
if self.visual is not None:
return self.visual.visualize(self)
else:
return None
def visualize_agent(self):
if self.agent is not None:
return self.agent.visualize()
| |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Properties module supplies a wide range of options that are
implemented as Jenkins job properties.
**Component**: properties
:Macro: property
:Entry Point: jenkins_jobs.properties
Example::
job:
name: test_job
properties:
- github:
url: https://github.com/openstack-infra/jenkins-job-builder/
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.errors import JenkinsJobsException
def builds_chain_fingerprinter(parser, xml_parent, data):
"""yaml: builds-chain-fingerprinter
Builds chain fingerprinter.
Requires the Jenkins `Builds chain fingerprinter Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Builds+chain+fingerprinter>`_
:arg bool per-builds-chain: enable builds hierarchy fingerprinting
(default False)
:arg bool per-job-chain: enable jobs hierarchy fingerprinting
(default False)
Example:
.. literalinclude:: /../../tests/properties/fixtures/fingerprinter.yaml
"""
fingerprinter = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'buildschainfingerprinter.'
'AutomaticFingerprintJobProperty')
XML.SubElement(fingerprinter, 'isPerBuildsChainEnabled').text = str(
data.get('per-builds-chain', False)).lower()
XML.SubElement(fingerprinter, 'isPerJobsChainEnabled').text = str(
data.get('per-job-chain', False)).lower()
def ownership(parser, xml_parent, data):
"""yaml: ownership
Plugin provides explicit ownership for jobs and slave nodes.
Requires the Jenkins `Ownership Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Ownership+Plugin>`_
:arg bool enabled: whether ownership enabled (default : true)
:arg str owner: the owner of job
:arg list co-owners: list of job co-owners
Example::
properties:
- ownership:
owner: abraverm
co-owners:
- lbednar
- edolinin
"""
ownership_plugin = \
XML.SubElement(xml_parent,
'com.synopsys.arc.'
'jenkins.plugins.ownership.jobs.JobOwnerJobProperty')
ownership = XML.SubElement(ownership_plugin, 'ownership')
owner = str(data.get('enabled', True)).lower()
XML.SubElement(ownership, 'ownershipEnabled').text = owner
XML.SubElement(ownership, 'primaryOwnerId').text = data.get('owner')
coowners = data.get('co-owners', [])
if coowners:
coownersIds = XML.SubElement(ownership, 'coownersIds')
for coowner in coowners:
XML.SubElement(coownersIds, 'string').text = coowner
def promoted_build(parser, xml_parent, data):
"""yaml: promoted-build
Marks a build for promotion. A promotion process with an identical
name must be created via the web interface in the job in order for the job
promotion to persist. Promotion processes themselves cannot be configured
by jenkins-jobs due to the separate storage of plugin configuration files.
Requires the Jenkins `Promoted Builds Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Promoted+Builds+Plugin>`_
:arg list names: the promoted build names
Example::
properties:
- promoted-build:
names:
- "Release to QA"
- "Jane Must Approve"
"""
promoted = XML.SubElement(xml_parent, 'hudson.plugins.promoted__builds.'
'JobPropertyImpl')
names = data.get('names', [])
if names:
active_processes = XML.SubElement(promoted, 'activeProcessNames')
for n in names:
XML.SubElement(active_processes, 'string').text = str(n)
def github(parser, xml_parent, data):
"""yaml: github
Sets the GitHub URL for the project.
:arg str url: the GitHub URL
Example::
properties:
- github:
url: https://github.com/openstack-infra/jenkins-job-builder/
"""
github = XML.SubElement(xml_parent,
'com.coravy.hudson.plugins.github.'
'GithubProjectProperty')
github_url = XML.SubElement(github, 'projectUrl')
github_url.text = data['url']
def least_load(parser, xml_parent, data):
"""yaml: least-load
Enables the Least Load Plugin.
Requires the Jenkins `Least Load Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Least+Load+Plugin>`_
:arg bool disabled: whether or not leastload is disabled (default True)
Example:
.. literalinclude:: /../../tests/properties/fixtures/least-load002.yaml
"""
least = XML.SubElement(xml_parent,
'org.bstick12.jenkinsci.plugins.leastload.'
'LeastLoadDisabledProperty')
XML.SubElement(least, 'leastLoadDisabled').text = str(
data.get('disabled', True)).lower()
def throttle(parser, xml_parent, data):
"""yaml: throttle
Throttles the number of builds for this job.
Requires the Jenkins `Throttle Concurrent Builds Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Throttle+Concurrent+Builds+Plugin>`_
:arg int max-per-node: max concurrent builds per node (default 0)
:arg int max-total: max concurrent builds (default 0)
:arg bool enabled: whether throttling is enabled (default True)
:arg str option: throttle `project` or `category`
:arg list categories: multiproject throttle categories
Example::
properties:
- throttle:
max-total: 4
categories:
- cat1
- cat2
"""
throttle = XML.SubElement(xml_parent,
'hudson.plugins.throttleconcurrents.'
'ThrottleJobProperty')
XML.SubElement(throttle, 'maxConcurrentPerNode').text = str(
data.get('max-per-node', '0'))
XML.SubElement(throttle, 'maxConcurrentTotal').text = str(
data.get('max-total', '0'))
# TODO: What's "categories"?
#XML.SubElement(throttle, 'categories')
if data.get('enabled', True):
XML.SubElement(throttle, 'throttleEnabled').text = 'true'
else:
XML.SubElement(throttle, 'throttleEnabled').text = 'false'
cat = data.get('categories', [])
if cat:
cn = XML.SubElement(throttle, 'categories')
for c in cat:
XML.SubElement(cn, 'string').text = str(c)
XML.SubElement(throttle, 'throttleOption').text = data.get('option')
XML.SubElement(throttle, 'configVersion').text = '1'
def inject(parser, xml_parent, data):
"""yaml: inject
Allows you to inject evironment variables into the build.
Requires the Jenkins `Env Inject Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/EnvInject+Plugin>`_
:arg str properties-file: file to read with properties (optional)
:arg str properties-content: key=value properties (optional)
:arg str script-file: file with script to run (optional)
:arg str script-content: script to run (optional)
:arg str groovy-content: groovy script to run (optional)
:arg bool load-from-master: load files from master (default false)
:arg bool enabled: injection enabled (default true)
:arg bool keep-system-variables: keep system variables (default true)
:arg bool keep-build-variables: keep build variable (default true)
Example::
properties:
- inject:
properties-content: FOO=bar
"""
inject = XML.SubElement(xml_parent,
'EnvInjectJobProperty')
info = XML.SubElement(inject, 'info')
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesFilePath', data.get('properties-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'propertiesContent', data.get('properties-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptFilePath', data.get('script-file'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'scriptContent', data.get('script-content'))
jenkins_jobs.modules.base.add_nonblank_xml_subelement(
info, 'groovyScriptContent', data.get('groovy-content'))
XML.SubElement(info, 'loadFilesFromMaster').text = str(
data.get('load-from-master', False)).lower()
XML.SubElement(inject, 'on').text = str(
data.get('enabled', True)).lower()
XML.SubElement(inject, 'keepJenkinsSystemVariables').text = str(
data.get('keep-system-variables', True)).lower()
XML.SubElement(inject, 'keepBuildVariables').text = str(
data.get('keep-build-variables', True)).lower()
def authenticated_build(parser, xml_parent, data):
"""yaml: authenticated-build
Specifies an authorization matrix where only authenticated users
may trigger a build.
DEPRECATED
Example::
properties:
- authenticated-build
"""
# TODO: generalize this
if data:
security = XML.SubElement(xml_parent,
'hudson.security.'
'AuthorizationMatrixProperty')
XML.SubElement(security, 'permission').text = \
'hudson.model.Item.Build:authenticated'
def authorization(parser, xml_parent, data):
"""yaml: authorization
Specifies an authorization matrix
The available rights are:
job-delete
job-configure
job-read
job-extended-read
job-discover
job-build
job-workspace
job-cancel
run-delete
run-update
scm-tag
Example::
properties:
- authorization:
admin:
- job-delete
- job-configure
- job-read
- job-discover
- job-build
- job-workspace
- job-cancel
- run-delete
- run-update
- scm-tag
anonymous:
- job-discover
- job-read
- job-extended-read
"""
mapping = {
'job-delete': 'hudson.model.Item.Delete',
'job-configure': 'hudson.model.Item.Configure',
'job-read': 'hudson.model.Item.Read',
'job-extended-read': 'hudson.model.Item.ExtendedRead',
'job-discover': 'hudson.model.Item.Discover',
'job-build': 'hudson.model.Item.Build',
'job-workspace': 'hudson.model.Item.Workspace',
'job-cancel': 'hudson.model.Item.Cancel',
'run-delete': 'hudson.model.Run.Delete',
'run-update': 'hudson.model.Run.Update',
'scm-tag': 'hudson.scm.SCM.Tag'
}
if data:
matrix = XML.SubElement(xml_parent,
'hudson.security.AuthorizationMatrixProperty')
for (username, perms) in data.items():
for perm in perms:
pe = XML.SubElement(matrix, 'permission')
pe.text = "{0}:{1}".format(mapping[perm], username)
def extended_choice(parser, xml_parent, data):
"""yaml: extended-choice
Creates an extended choice property where values can be read from a file
Requires the Jenkins `Extended Choice Parameter Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/
Extended+Choice+Parameter+plugin>`_
:arg string name: name of the property
:arg string description: description of the property (optional, default '')
:arg string property-file: location of property file to read from
(optional, default '')
:arg string property-key: key for the property-file (optional, default '')
:arg bool quote-value: whether to put quotes around the property
when passing to Jenkins (optional, default false)
:arg string visible-items: number of items to show in the list
(optional, default 5)
:arg string type: type of select (optional, default single-select)
:arg string value: comma separated list of values for the single select
or multi-select box (optional, default '')
:arg string default-value: used to set the initial selection of the
single-select or multi-select box (optional, default '')
:arg string default-property-file: location of property file when default
value needs to come from a property file (optional, default '')
:arg string default-property-key: key for the default property file
(optional, default '')
Example::
properties:
- extended-choice:
name: FOO
description: A foo property
property-file: /home/foo/property.prop
property-key: key
quote-value: true
visible-items: 10
type: multi-select
value: foo,bar,select
default-value: foo
default-property-file: /home/property.prop
default-property-key: fookey
"""
definition = XML.SubElement(xml_parent,
'hudson.model.ParametersDefinitionProperty')
definitions = XML.SubElement(definition, 'parameterDefinitions')
extended = XML.SubElement(definitions, 'com.cwctravel.hudson.plugins.'
'extended__choice__parameter.'
'ExtendedChoiceParameterDefinition')
XML.SubElement(extended, 'name').text = data['name']
XML.SubElement(extended, 'description').text = data.get('description', '')
XML.SubElement(extended, 'quoteValue').text = str(data.get('quote-value',
False)).lower()
XML.SubElement(extended, 'visibleItemCount').text = data.get(
'visible-items', '5')
choice = data.get('type', 'single-select')
choicedict = {'single-select': 'PT_SINGLE_SELECT',
'multi-select': 'PT_MULTI_SELECT',
'radio': 'PT_RADIO',
'checkbox': 'PT_CHECKBOX'}
if choice not in choicedict:
raise JenkinsJobsException("Type entered is not valid, must be one "
"of: single-select, multi-select, radio, "
"or checkbox")
XML.SubElement(extended, 'type').text = choicedict[choice]
XML.SubElement(extended, 'value').text = data.get('value', '')
XML.SubElement(extended, 'propertyFile').text = data.get('property-file',
'')
XML.SubElement(extended, 'propertyKey').text = data.get('property-key', '')
XML.SubElement(extended, 'defaultValue').text = data.get('default-value',
'')
XML.SubElement(extended, 'defaultPropertyFile').text = data.get(
'default-property-file', '')
XML.SubElement(extended, 'defaultPropertyKey').text = data.get(
'default-property-key', '')
def priority_sorter(parser, xml_parent, data):
"""yaml: priority-sorter
Allows simple ordering of builds, using a configurable job priority.
Requires the Jenkins `Priority Sorter Plugin
<https://wiki.jenkins-ci.org/display/JENKINS/Priority+Sorter+Plugin>`_.
:arg int priority: Priority of the job. Higher value means higher
priority, with 100 as the standard priority. (required)
Example::
properties:
- priority-sorter:
priority: 150
"""
priority_sorter_tag = XML.SubElement(xml_parent,
'hudson.queueSorter.'
'PrioritySorterJobProperty')
XML.SubElement(priority_sorter_tag, 'priority').text = str(
data['priority'])
def build_blocker(parser, xml_parent, data):
"""yaml: build-blocker
This plugin keeps the actual job in the queue
if at least one name of currently running jobs
is matching with one of the given regular expressions.
Requires the Jenkins `Build Blocker Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Build+Blocker+Plugin>`_
:arg bool use-build-blocker: Enable or disable build blocker
(optional) (default true)
:arg list blocking-jobs: One regular expression per line
to select blocking jobs by their names. (required)
Example::
properties:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- ".*-deploy"
- "^maintenance.*"
"""
blocker = XML.SubElement(xml_parent,
'hudson.plugins.'
'buildblocker.BuildBlockerProperty')
if data is None or 'blocking-jobs' not in data:
raise JenkinsJobsException('blocking-jobs field is missing')
elif data.get('blocking-jobs', None) is None:
raise JenkinsJobsException('blocking-jobs list must not be empty')
XML.SubElement(blocker, 'useBuildBlocker').text = str(
data.get('use-build-blocker', True)).lower()
jobs = ''
for value in data['blocking-jobs']:
jobs = jobs + value + '\n'
XML.SubElement(blocker, 'blockingJobs').text = jobs
def copyartifact(parser, xml_parent, data):
"""yaml: copyartifact
Specify a list of projects that have access to copy the artifacts of
this project.
Requires the Jenkins `Copy Artifact plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Copy+Artifact+Plugin>`_
:arg string projects: comma separated list of projects that can copy
artifacts of this project. Wild card character '*' is available.
Example:
.. literalinclude:: \
/../../tests/properties/fixtures/copyartifact.yaml
"""
copyartifact = XML.SubElement(xml_parent,
'hudson.plugins.'
'copyartifact.'
'CopyArtifactPermissionProperty',
plugin='copyartifact')
if not data or not data.get('projects', None):
raise JenkinsJobsException("projects string must exist and "
"not be empty")
projectlist = XML.SubElement(copyartifact, 'projectNameList')
XML.SubElement(projectlist, 'string').text = data.get('projects')
def batch_tasks(parser, xml_parent, data):
"""yaml: batch-tasks
Batch tasks can be tasks for events like releases, integration, archiving,
etc. In this way, anyone in the project team can execute them in a way that
leaves a record.
A batch task consists of a shell script and a name. When you execute
a build, the shell script gets run on the workspace, just like a build.
Batch tasks and builds "lock" the workspace, so when one of those
activities is in progress, all the others will block in the queue.
Requires the Jenkins `Batch Task Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Batch+Task+Plugin>`_
:arg list batch-tasks: Batch tasks.
:Task: * **name** (`str`) Task name.
* **script** (`str`) Task script.
Example:
.. literalinclude:: /../../tests/properties/fixtures/batch-task.yaml
"""
pdef = XML.SubElement(xml_parent,
'hudson.plugins.batch__task.BatchTaskProperty')
tasks = XML.SubElement(pdef, 'tasks')
for task in data:
batch_task = XML.SubElement(tasks,
'hudson.plugins.batch__task.BatchTask')
XML.SubElement(batch_task, 'name').text = task['name']
XML.SubElement(batch_task, 'script').text = task['script']
def heavy_job(parser, xml_parent, data):
"""yaml: heavy-job
This plugin allows you to define "weight" on each job,
and making each job consume that many executors
Requires the Jenkins `Heavy Job Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Heavy+Job+Plugin>`_
:arg int weight: Specify the total number of executors
that this job should occupy (default 1)
Example:
.. literalinclude:: /../../tests/properties/fixtures/heavy-job.yaml
"""
heavyjob = XML.SubElement(xml_parent,
'hudson.plugins.'
'heavy__job.HeavyJobProperty')
XML.SubElement(heavyjob, 'weight').text = str(
data.get('weight', 1))
def slave_utilization(parser, xml_parent, data):
"""yaml: slave-utilization
This plugin allows you to specify the percentage of a slave's capacity a
job wants to use.
Requires the Jenkins `Slave Utilization Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Slave+Utilization+Plugin>`_
:arg int slave-percentage: Specify the percentage of a slave's execution
slots that this job should occupy (default: 0)
:arg bool single-instance-per-slave: Control whether concurrent instances
of this job will be permitted to run in parallel on a single slave
(default: False)
Example:
.. literalinclude:: \
/../../tests/properties/fixtures/slave-utilization1.yaml
"""
utilization = XML.SubElement(
xml_parent, 'com.suryagaddipati.jenkins.SlaveUtilizationProperty')
percent = int(data.get('slave-percentage', 0))
XML.SubElement(utilization, 'needsExclusiveAccessToNode'
).text = 'true' if percent else 'false'
XML.SubElement(utilization, 'slaveUtilizationPercentage'
).text = str(percent)
XML.SubElement(utilization, 'singleInstancePerSlave').text = str(
data.get('single-instance-per-slave', False)).lower()
def delivery_pipeline(parser, xml_parent, data):
"""yaml: delivery-pipeline
Requires the Jenkins `Delivery Pipeline Plugin.
<https://wiki.jenkins-ci.org/display/JENKINS/Delivery+Pipeline+Plugin>`_
:arg str stage: Name of the stage for this job (default: '')
:arg str task: Name of the task for this job (default: '')
Example:
.. literalinclude:: \
/../../tests/properties/fixtures/delivery-pipeline1.yaml
"""
pipeline = XML.SubElement(xml_parent,
'se.diabol.jenkins.pipeline.'
'PipelineProperty')
XML.SubElement(pipeline, 'stageName').text = data.get('stage', '')
XML.SubElement(pipeline, 'taskName').text = data.get('task', '')
def zeromq_event(parser, xml_parent, data):
"""yaml: zeromq-event
This is a Jenkins plugin that will publish Jenkins Job run events
(start, complete, finish) to a ZMQ PUB socket.
Requires the Jenkins `ZMQ Event Publisher.
<https://git.openstack.org/cgit/openstack-infra/zmq-event-publisher>`_
Example:
.. literalinclude:: \
/../../tests/properties/fixtures/zeromq-event.yaml
"""
zmq_event = XML.SubElement(xml_parent,
'org.jenkinsci.plugins.'
'ZMQEventPublisher.HudsonNotificationProperty')
XML.SubElement(zmq_event, 'enabled').text = 'true'
class Properties(jenkins_jobs.modules.base.Base):
sequence = 20
component_type = 'property'
component_list_type = 'properties'
def gen_xml(self, parser, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
for prop in data.get('properties', []):
self.registry.dispatch('property', parser, properties, prop)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import kernels
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
class CollectiveOpTest(test.TestCase):
def _testCollectiveReduce(self, inputs, expected, set_graph_key,
communication_hint='auto', fp16=False,
instance_key=1, merge_op='Add', final_op='Div'):
group_key = 1
group_size = len(inputs)
device_type = 'CPU'
config = config_pb2.ConfigProto(device_count={device_type: group_size})
devices = ['/{}:{}'.format(device_type, i) for i in range(group_size)]
with self.session(config=config) as sess:
colred = []
for i in range(group_size):
with ops.device(devices[i]):
tensor = constant_op.constant(inputs[i], dtype=(
dtypes.float16 if fp16 else dtypes.float32))
colred.append(collective_ops.all_reduce(
tensor, group_size, group_key, instance_key, merge_op, final_op,
communication_hint=communication_hint))
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run(colred, options=run_options)
tolerance = 1e-3 if fp16 else 1e-5
for i in range(group_size):
logging.info('i {} result {} expected {}'.format(i, results[i], expected))
self.assertAllClose(results[i], expected, rtol=tolerance, atol=tolerance)
def _testMultipleConcurrentCollectiveReduce(self, t0, t1, expected):
group_key = 1
group_size = 2
num_instances = 2
all_reduces = []
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
config.experimental.collective_deterministic_sequential_execution = True
with self.session(config=config) as sess:
for cpu in range(group_size):
with ops.device('/CPU:%d' % cpu):
in_tensor = constant_op.constant(t0 if cpu == 0 else t1)
for instance in range(num_instances):
all_reduces.append(collective_ops.all_reduce(
in_tensor, group_size, group_key, instance, 'Add', 'Div'))
results = sess.run(all_reduces)
for i in range(group_size * num_instances):
self.assertAllClose(results[i], expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveReduce(self):
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True)
@test_util.run_deprecated_v1
def testCollectiveAutoGraphKey(self):
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False)
@test_util.run_deprecated_v1
def testFp16Reduce(self):
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=True,
fp16=True)
@test_util.run_deprecated_v1
def testCollectiveMultipleConcurrentReduce(self):
self._testMultipleConcurrentCollectiveReduce(
[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3],
[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2])
@test_util.run_deprecated_v1
def testNcclHintFallbackToRingReduce(self):
"""Tests that setting `communication_hint=nccl` works on non-GPU builds."""
if kernels.get_registered_kernels_for_op('NcclAllReduce'):
self.skipTest('Run only on non-GPU environments')
self._testCollectiveReduce(
inputs=[[0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1],
[0.3, 1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3]],
expected=[0.2, 1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2],
set_graph_key=False,
communication_hint='nccl')
def _testWhile(self, num_vars, num_iterations, key_base):
group_size = 2
group_key = 1
instances = [(key_base + i) for i in range(num_vars)]
devices = ['CPU:{}'.format(i) for i in range(group_size)]
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
with self.session(config=config) as sess:
loop_vars = []
for device in devices:
with ops.device(device):
loop_vars.append(
[variables.VariableV1((1 << i) * 1.) for i in range(num_vars)])
# This variable controls number of iterations.
loop_vars.append(variables.VariableV1(0.))
def loop_body(dev0_tensors, dev1_tensors, loop_tensor):
return_ops = []
for i in range(len(devices)):
device = devices[i]
device_tensors = dev0_tensors if i == 0 else dev1_tensors
with ops.device(device):
device_collectives = []
for j in range(num_vars):
# NOTE(ayushd): we need the `cast` here to ensure that the input
# to `all_reduce` has an explicit device string. We don't use
# `identity` because `cast` is more resilient to getting optimized
# away by various optimization passes.
input_tensor = math_ops.cast(device_tensors[j], dtypes.float16)
collective_op = collective_ops.all_reduce(
input_tensor, group_size, group_key, instances[j],
'Add', 'Id')
output_tensor = math_ops.cast(collective_op, dtypes.float32)
device_collectives.append(output_tensor)
return_ops.append(device_collectives)
return_ops.append(math_ops.add(loop_tensor, 1.))
return return_ops
# Run until last variable exceeds number of iterations.
loop_cond = lambda d0, d1, i: math_ops.less(i, num_iterations)
sess.run(variables.global_variables_initializer())
results = sess.run(control_flow_ops.while_loop(loop_cond, loop_body,
loop_vars))
self.assertEqual(results[:-1], [
[((1 << (num_iterations + v)) * 1.) for v in range(num_vars)]
for _ in range(group_size)])
@test_util.run_deprecated_v1
def testSimpleWhile(self):
self._testWhile(num_vars=1, num_iterations=4, key_base=20)
@test_util.run_deprecated_v1
def testWhileMultipleAllReduce(self):
self._testWhile(num_vars=2, num_iterations=4, key_base=20)
@test_util.run_deprecated_v1
def testWhileWithScopedAllocator(self):
group_size = 2
group_key = 1
instance_key0 = 1
instance_key1 = 2
config = config_pb2.ConfigProto(device_count={'CPU': group_size})
rewrite_options = config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append('CollectiveReduce')
with self.session(config=config) as sess:
run_ops = []
for i in range(group_size):
with ops.device('CPU:%d' % i):
constant = constant_op.constant(0.)
cond = lambda i: math_ops.less(i, 10.)
body = lambda i: math_ops.add(i, 1.)
input0 = control_flow_ops.while_loop(cond, body, [constant])
input1 = math_ops.add(constant, 5)
colred0 = collective_ops.all_reduce(input0, group_size, group_key,
instance_key0, 'Add', 'Id')
colred1 = collective_ops.all_reduce(input1, group_size, group_key,
instance_key1, 'Add', 'Id')
run_ops.append(math_ops.add_n([colred0, colred1]))
results = sess.run(run_ops)
self.assertEqual(results, [30., 30.])
@test_util.run_deprecated_v1
def testCollectiveReduceScalar(self):
self._testCollectiveReduce(inputs=[0.1, 0.3], expected=0.2,
set_graph_key=True)
@test_util.run_deprecated_v1
def testCollectiveReduceMaximum(self):
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[10., 20., 30., 40., 50.],
set_graph_key=True,
instance_key=30,
merge_op='Max',
final_op='Id')
@test_util.run_deprecated_v1
def testCollectiveReduceMinimum(self):
self._testCollectiveReduce(
inputs=[[1., 20., 3., 40., 5.], [10., 2., 30., 4., 50.]],
expected=[1., 2., 3., 4., 5.],
set_graph_key=True,
instance_key=40,
merge_op='Min',
final_op='Id')
def _testCollectiveBroadcast(self, t0):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
out0 = collective_ops.broadcast_send(in0, in0.shape, in0.dtype,
2, group_key, instance_key)
with ops.device('/CPU:1'):
c1 = constant_op.constant(t0)
out1 = collective_ops.broadcast_recv(c1.shape, c1.dtype,
2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
results = sess.run([out0, out1], options=run_options)
self.assertAllClose(results[0], t0, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], t0, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveBroadcast(self):
self._testCollectiveBroadcast([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1])
def _testCollectiveGather(self, t0, t1, expected, set_graph_key):
group_key = 1
instance_key = 1
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
if set_graph_key:
run_options.experimental.collective_graph_key = 1
results = sess.run([c0, c1], options=run_options)
self.assertAllClose(results[0], expected, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected, rtol=1e-5, atol=1e-5)
@test_util.run_deprecated_v1
def testCollectiveGather(self):
self._testCollectiveGather([0, 1, 2, 3, 4, 5, 6, 7],
[10, 11, 12, 13, 14, 15, 16, 17],
[0, 1, 2, 3, 4, 5, 6, 7,
10, 11, 12, 13, 14, 15, 16, 17],
True)
self._testCollectiveGather([[0, 1, 2, 3], [4, 5, 6, 7]],
[[10, 11, 12, 13], [14, 15, 16, 17]],
[[0, 1, 2, 3], [4, 5, 6, 7],
[10, 11, 12, 13], [14, 15, 16, 17]],
True)
self._testCollectiveGather([[[0, 1], [2, 3]], [[4, 5], [6, 7]]],
[[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
[[[0, 1], [2, 3]], [[4, 5], [6, 7]],
[[10, 11], [12, 13]], [[14, 15], [16, 17]]],
True)
@test_util.run_deprecated_v1
def testCollectiveGatherShapeMismatch(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
t2 = [9, 10]
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
in2 = constant_op.constant(t2)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
c2 = collective_ops.all_gather(in2, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
sess.run([c0, c1], options=run_options)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c2], options=run_options)
@test_util.run_deprecated_v1
def testCollectiveGatherShapeMismatchAcrossDevices(self):
group_key = 1
instance_key = 1
t0 = [1, 2, 3, 4]
t1 = [5, 6]
with self.session(
config=config_pb2.ConfigProto(device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_gather(in0, 2, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_gather(in1, 2, group_key, instance_key)
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
with self.assertRaisesRegexp(errors.InvalidArgumentError,
'Shape mismatch'):
sess.run([c0, c1], options=run_options)
@test_util.run_deprecated_v1
def testCollectiveGatherPolymorphicShape(self):
t0 = [0, 1, 2, 3, 4, 5, 6, 7]
t1 = [10, 11, 12, 13, 14, 15, 16, 17]
group_size = 2
group_key = 1
instance_key = 123
with self.session(
config=config_pb2.ConfigProto(
device_count={'CPU': group_size})) as sess:
with ops.device('/CPU:0'):
in0 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c0 = collective_ops.all_gather(in0, group_size, group_key, instance_key)
with ops.device('/CPU:1'):
in1 = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
c1 = collective_ops.all_gather(in1, group_size, group_key, instance_key)
results = sess.run([c0, c1], feed_dict={in0: t0, in1: t1})
expected_output = [0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results[0], expected_output, rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], expected_output, rtol=1e-5, atol=1e-5)
results_ = sess.run([c0, c1], feed_dict={in0: t0[1:], in1: t1[1:]})
expected_output_ = [1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17]
self.assertAllClose(results_[0], expected_output_, rtol=1e-5, atol=1e-5)
self.assertAllClose(results_[1], expected_output_, rtol=1e-5, atol=1e-5)
@test_util.run_v2_only
def testCollectiveGroupSizeMismatch(self):
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
@def_function.function
def run_all_reduce():
group_key = 10
instance_key = 20
t0 = [1, 2, 3, 4]
t1 = [5, 6, 7, 8]
with ops.device('/CPU:0'):
in0 = constant_op.constant(t0)
c0 = collective_ops.all_reduce(
in0, group_size=2, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
with ops.device('/CPU:1'):
in1 = constant_op.constant(t1)
c1 = collective_ops.all_reduce(
in1, group_size=3, group_key=group_key, instance_key=instance_key,
merge_op='Add', final_op='Id')
return c0, c1
with self.assertRaisesRegexp(errors.InternalError,
'but that group has size'):
run_all_reduce()
@test_util.run_deprecated_v1
def testCollectiveTensorsHaveNoDeviceSpecified(self):
group_size = 2
group_key = 1
instance_key = 1
@def_function.function
def fn(all_args):
results = []
# The inputs have no devices set. This is expected to be a trace-time
# check only.
self.assertEqual(all_args[0].device, '')
self.assertEqual(all_args[1].device, '')
with ops.device('/CPU:0'):
results.append(
collective_ops.all_reduce(all_args[0], group_size, group_key,
instance_key, 'Add', 'Div'))
with ops.device('/CPU:1'):
results.append(
collective_ops.all_reduce(all_args[1], group_size, group_key,
instance_key, 'Add', 'Div'))
return results
with self.session(config=config_pb2.ConfigProto(
device_count={'CPU': 2})) as sess:
with ops.device('/CPU:0'):
in0 = constant_op.constant(1)
with ops.device('/CPU:1'):
in1 = constant_op.constant(3)
result_op = fn([in0, in1])
run_options = config_pb2.RunOptions()
run_options.experimental.collective_graph_key = 1
result = sess.run(result_op, options=run_options)
self.assertAllClose(result, [2, 2])
@test_util.run_v2_only
def testCollectiveGroupSizeOne(self):
group_size = 1
group_key = 100
instance_key = 100
in_value = [1, 2, 3, 4]
in_tensor = constant_op.constant(in_value)
reduced_tensor = collective_ops.all_reduce(
in_tensor, group_size, group_key, instance_key, 'Add', 'Id')
self.assertAllEqual(in_value, reduced_tensor.numpy())
gathered_tensor = collective_ops.all_gather(
in_tensor, group_size, group_key, instance_key)
self.assertAllEqual(in_value, gathered_tensor.numpy())
if __name__ == '__main__':
test.main()
| |
from __init__ import *
def test_clas_lens(strgcnfgextnexec=None):
pathimag = os.environ["PCAT_DATA_PATH"] + '/imag/'
dictargs = {}
dictargs['elemtype'] = ['lens']
dictargs['exprtype'] = 'hubb'
dictargs['elemtype'] = ['lens']
dictargs['maxmnumbelempop0reg0'] = 0
dictargs['numbelempop0reg0'] = 0
dictargs['makeplot'] = False
dictargs['mockonly'] = True
dictargs['verbtype'] = 0
dictargsvari = {}
numbiter = 1000
numbtotl = 1000000
indxtotl = arange(numbtotl)
indxprev = []
listrtag = fnmatch.filter(os.listdir(pathimag), '*pcat_clas_lens_*')
for rtag in listrtag:
boolgdatinit = pcat.util.chec_statfile(rtag, 'gdatinit')
if not boolgdatinit:
pcat.util.dele_rtag(rtag)
else:
indxprev.append(int(rtag.split('pcat_clas_lens_')[1][4:12]))
indxprev = array(indxprev)
indxiter = setdiff1d(indxtotl, indxprev)
indxiter = choice(indxiter, size=numbiter, replace=False)
for k in indxiter:
if rand() > 0.5:
namecnfgextn = 'none%08d' % k
else:
namecnfgextn = 'lens%08d' % k
dictargsvari[namecnfgextn] = {}
if namecnfgextn.startswith('lens'):
dictargsvari[namecnfgextn]['truefluxsourreg0'] = 1e-22
dictargsvari[namecnfgextn]['seedtype'] = k
dictglob = pcat.main.initarry( \
dictargsvari, \
dictargs, \
listnamecnfgextn, \
strgcnfgextnexec=strgcnfgextnexec, \
)
def writ_data():
import tensorflow as tf
pathdata = os.environ["PCAT_DATA_PATH"] + '/data/outp/'
listrtagnone = fnmatch.filter(os.listdir(pathdata), '20*pcat_clas_lens_none*')
listrtaglens = fnmatch.filter(os.listdir(pathdata), '20*pcat_clas_lens_lens*')
listrtag = listrtagnone + listrtaglens
boollenstemp = []
cntpdatatemp = []
for k, rtag in enumerate(listrtag):
print 'Processing %s...' % rtag
boolgdatinit = pcat.util.chec_statfile(rtag, 'gdatinit')
if not boolgdatinit:
continue
pathoutprtag = pcat.util.retr_pathoutprtag(rtag)
path = pathoutprtag + 'gdatinit'
gdat = pcat.util.readfile(path)
cntpdatatemp.append(gdat.cntpdatareg0[0, :, 0])
if rtag in listrtaglens:
boollenstemp.append(True)
else:
boollenstemp.append(False)
numbcnfg = len(boollenstemp)
cntpdata = empty((numbcnfg, cntpdatatemp[0].size))
boollens = zeros(numbcnfg, dtype=bool)
indxcnfg = arange(numbcnfg)
print 'numbcnfg'
print numbcnfg
for k in indxcnfg:
boollens[k] = boollenstemp[k]
cntpdata[k, :] = cntpdatatemp[k]
print 'k'
print k
print 'cntpdata[k, :]'
summgene(cntpdata[k, :])
print 'boollens[k]'
print boollens[k]
print
pathclaslens = os.environ["TDGU_DATA_PATH"] + '/clas_lens/data/'
path = pathclaslens + 'claslens.h5'
print 'Writing to %s...' % path
filearry = h5py.File(path, 'w')
filearry.create_dataset('cntpdata', data=cntpdata)
filearry.create_dataset('boollens', data=boollens)
filearry.close()
def retr_conv(inpt, size_in, size_out, name="conv"):
import tensorflow as tf
with tf.name_scope(name):
wght = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name="wght" + name)
bias = tf.Variable(tf.constant(0.1, shape=[size_out]), name="bias" + name)
conv = tf.nn.conv2d(inpt, wght, strides=[1, 1, 1, 1], padding="SAME")
acti = tf.nn.relu(conv + bias)
tf.summary.histogram("wght" + name, wght)
tf.summary.histogram("bias" + name, bias)
tf.summary.histogram("acti" + name, acti)
return tf.nn.max_pool(acti, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def retr_fulc(inpt, size_in, size_out, name="fulc"):
import tensorflow as tf
with tf.name_scope(name):
wght = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="wght" + name)
bias = tf.Variable(tf.constant(0.1, shape=[size_out]), name="bias" + name)
acti = tf.nn.relu(tf.matmul(inpt, wght) + bias)
tf.summary.histogram("wght" + name, wght)
tf.summary.histogram("bias" + name, bias)
tf.summary.histogram("acti" + name, acti)
return acti
def clas_lens_wrap(ratelern, boolconvdoub, boolfulcdoub, strghypr):
print 'Classifer initialized.'
import tensorflow as tf
boolexeclens = False
if boolexeclens:
pathclaslens = os.environ["TDGU_DATA_PATH"] + '/clas_lens/data/'
path = pathclaslens + 'claslens.h5'
print 'Reading %s...' % path
filearry = h5py.File(path, 'r')
cntpdata = filearry['cntpdata'][()]
boollens = filearry['boollens'][()]
filearry.close()
numbdata = cntpdata.shape[0]
else:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
cntpdata = mnist[0]
boollens = mnist[1]
print 'Found %s images.' % numbdata
indxdata = arange(numbdata)
if boolexeclens:
print 'Randomizing the order of the dataset...'
indxrndm = choice(indxdata, size=numbdata, replace=False)
cntpdata = cntpdata[indxrndm, :]
boollens = boollens[indxrndm]
# make labels one-hot encoded
boollenstemp = copy(boollens)
boollens = zeros((numbdata, 2))
boollens[where(logical_not(boollenstemp))[0], 0] = 1.
boollens[where(boollenstemp)[0], 1] = 1.
numbdatatran = int(0.5 * numbdata) - int(0.5 * numbdata) % 10
sizebtch = numbdatatran / 10
print 'Using %d of the images as the training dataset.' % numbdatatran
print 'Will train in 10 batches with batch size %d' % sizebtch
numbside = int(sqrt(cntpdata.shape[1]))
numbdatatest = numbdata - numbdatatran
indxdatatest = arange(numbdatatest)
filelabl = open(pathclaslens + 'labl.tsv', 'w')
for k in indxdatatest:
if boollens[k, 0] == 1.:
filelabl.write('0\t')
if boollens[k, 1] == 1.:
filelabl.write('1\t')
filelabl.close()
numbimagsidesprt = int(ceil(sqrt(numbdatatest)))
numbsidesprt = numbside * numbimagsidesprt
cntpdatasprt = zeros((numbsidesprt, numbsidesprt))
for k in indxdatatest:
indxxaxi = k % numbimagsidesprt
indxyaxi = k // numbimagsidesprt
cntpdatasprt[indxyaxi*numbside:(indxyaxi+1)*numbside, indxxaxi*numbside:(indxxaxi+1)*numbside] = cntpdata[k, :].reshape((numbside, numbside))
cntpdatasprt /= amax(cntpdatasprt)
cntpdatasprt *= 255.
sp.misc.imsave(pathclaslens + 'sprt.png', cntpdatasprt)
sizeembd = 1024
tf.reset_default_graph()
sess = tf.Session()
tenscntpdata = tf.placeholder(tf.float32, shape=[sizebtch, 10000], name="cntpdata")
tenstruelabl = tf.placeholder(tf.float32, shape=[sizebtch, 2], name="labl")
tens = tf.reshape(tenscntpdata, [sizebtch, 100, 100, 1])
tf.summary.image('inpt', tens, 3)
if boolconvdoub:
tens = retr_conv(tens, 1, 32, "conv0000")
tf.summary.image('conv0000', tens[:, :, :, 0:1], 3)
tens = retr_conv(tens, 32, 64, "conv0001");
tf.summary.image('conv0001', tens[:, :, :, 0:1], 3)
else:
tens = retr_conv(tens, 1, 64, "conv")
tf.summary.image('conv', tens, 3)
tens = tf.nn.max_pool(tens, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
tf.summary.image('convpool', tens, 3)
tens = tf.reshape(tens, [sizebtch, 25 * 25 * 64])
if boolfulcdoub:
tens = retr_fulc(tens, 25 * 25 * 64, sizeembd, "fulc0000")
inptembd = tens
logits = retr_fulc(tens, sizeembd, 2, "fulc0001")
else:
inptembd = tens
logits = retr_fulc(tens, 25*25*64, 2, "fulc")
tens = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=tenstruelabl), name="cent")
tf.summary.scalar("tenscent", tens)
funcopti = tf.train.AdamOptimizer(ratelern).minimize(tens)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(tenstruelabl, 1))
accu = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("tensaccu", accu)
smry = tf.summary.merge_all()
embedding = tf.Variable(tf.zeros([sizebtch, sizeembd]), name="embd")
assignment = embedding.assign(inptembd)
objtsave = tf.train.Saver()
sess.run(tf.global_variables_initializer())
objtwrit = tf.summary.FileWriter(pathclaslens + strghypr)
objtwrit.add_graph(sess.graph)
cnfgtsbr = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
cnfgembd = cnfgtsbr.embeddings.add()
cnfgembd.tensor_name = embedding.name
cnfgembd.sprite.image_path = pathclaslens + 'sprt.png'
cnfgembd.metadata_path = pathclaslens + 'labl.tsv'
cnfgembd.sprite.single_image_dim.extend([100, 100])
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(objtwrit, cnfgtsbr)
numbiter = numbdatatran / sizebtch
indxiter = arange(numbiter)
numbepoc = 5
indxepoc = arange(numbepoc)
cntr = 0
for k in indxepoc:
for i in indxiter:
indxcnfgbtch = arange(i*sizebtch, (i+1)*sizebtch) + numbdatatest
batch = [cntpdata[indxcnfgbtch, :], boollens[indxcnfgbtch, :]]
if cntr % 1 == 0:
temp, meta = sess.run([accu, smry], feed_dict={tenscntpdata: batch[0], tenstruelabl: batch[1]})
objtwrit.add_summary(meta, cntr)
if False and cntr % 1 == 0:
sess.run(assignment, feed_dict={tenscntpdata: cntpdata[:sizebtch, :], tenstruelabl: boollens[:sizebtch, :]})
objtsave.save(sess, os.path.join(pathclaslens, "model.ckpt"), cntr)
print 'cntr: ', cntr
sess.run(funcopti, feed_dict={tenscntpdata: batch[0], tenstruelabl: batch[1]})
cntr += 1
def test_line():
import tensorflow as tf
sizefeat = 100
truewght = 3.
truebias = 2.
pathclaslens = os.environ["TDGU_DATA_PATH"] + '/clas_lens/data/'
feat = np.linspace(-1, 1, sizefeat)
truelabl = truewght * feat + truebias + np.random.randn(sizefeat)
tensfeat = tf.placeholder("float")
tenstruelabl = tf.placeholder("float")
tenswght = tf.Variable(np.random.randn(), name="wght")
tensbias = tf.Variable(np.random.randn(), name="bias")
tensmodllabl = tf.add(tf.multiply(tensfeat, tenswght), tensbias)
tf.summary.scalar("wght", tenswght)
tf.summary.histogram("wght", tenswght)
tf.summary.scalar("bias", tensbias)
tenscost = tf.reduce_sum(tf.square(tenstruelabl - tensmodllabl))
tf.summary.scalar("tenscost", tenscost)
operloss = tf.train.GradientDescentOptimizer(0.0001).minimize(tenscost)
smry = tf.summary.merge_all()
objtwrit = tf.summary.FileWriter(pathclaslens + 'line')
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(1000):
sess.run(operloss, feed_dict={tensfeat: feat, tenstruelabl: truelabl})
meta = sess.run(smry, feed_dict={tensfeat: feat, tenstruelabl: truelabl})
objtwrit.add_summary(meta, i)
def test_conv():
import os.path
import shutil
import tensorflow as tf
pathclaslens = os.environ["TDGU_DATA_PATH"] + '/clas_lens/data/'
LABELS = os.path.join(os.getcwd(), "labels_1024.tsv")
SPRITES = os.path.join(os.getcwd(), "sprite_1024.png")
mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=pathclaslens + "data", one_hot=True)
ratelern = 0.001
tf.reset_default_graph()
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
y = tf.placeholder(tf.float32, shape=[None, 10], name="truelabl")
feat = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', feat, 3)
tens = retr_conv(feat, 1, 32, "conv0000")
tens = retr_conv(tens, 32, 64, "conv0001")
tens = tf.reshape(tens, [-1, 7 * 7 * 64])
fc1 = retr_fulc(tens, 7 * 7 * 64, 1024, "fc1")
relu = tf.nn.relu(fc1)
embedding_input = relu
tf.summary.histogram("fc1/relu", relu)
sizeembd = 1024
logits = retr_fulc(fc1, 1024, 10, "fc2")
with tf.name_scope("cent"):
cent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y), name="cent")
tf.summary.scalar("cent", cent)
with tf.name_scope("train"):
opertran = tf.train.AdamOptimizer(ratelern).minimize(cent)
with tf.name_scope("accu"):
accu = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1)), tf.float32))
tf.summary.scalar("accu", accu)
summ = tf.summary.merge_all()
embedding = tf.Variable(tf.zeros([1024, sizeembd]), name="test_embedding")
assignment = embedding.assign(embedding_input)
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(pathclaslens + 'conv')
writer.add_graph(sess.graph)
cnfgtdbr = tf.contrib.tensorboard.plugins.projector.ProjectorConfig()
cnfgembd = cnfgtdbr.embeddings.add()
cnfgembd.tensor_name = embedding.name
cnfgembd.sprite.image_path = SPRITES
cnfgembd.metadata_path = LABELS
cnfgembd.sprite.single_image_dim.extend([28, 28])
tf.contrib.tensorboard.plugins.projector.visualize_embeddings(writer, cnfgtdbr)
for i in range(200):
batch = mnist.train.next_batch(100)
if i % 5 == 0:
tensaccu, s = sess.run([accu, summ], feed_dict={x: batch[0], y: batch[1]})
writer.add_summary(s, i)
#if i % 500 == 0:
# sess.run(assignment, feed_dict={x: mnist.test.images[:1024], y: mnist.test.labels[:1024]})
# saver.save(sess, os.path.join(pathclaslens, "model.ckpt"), i)
sess.run(opertran, feed_dict={x: batch[0], y: batch[1]})
def test_mlor():
import tensorflow as tf
tf.reset_default_graph()
sizebthc = 100
ratelern = 0.5
numbepoc = 5
pathclaslens = os.environ["TDGU_DATA_PATH"] + '/clas_lens/data/'
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
with tf.name_scope('inpt'):
tensfeat = tf.placeholder(tf.float32, shape=[None, 784], name="feat")
tenstruelabl = tf.placeholder(tf.float32, shape=[None, 10], name="truelabl")
with tf.name_scope("wght"):
variwght = tf.Variable(tf.zeros([784, 10]))
with tf.name_scope("bias"):
varibias = tf.Variable(tf.zeros([10]))
with tf.name_scope("softmax"):
tensmodllabl = tf.nn.softmax(tf.matmul(tensfeat, variwght) + varibias)
with tf.name_scope('cent'):
tenscent = tf.reduce_mean(-tf.reduce_sum(tenstruelabl * tf.log(tensmodllabl), reduction_indices=[1]))
with tf.name_scope('tran'):
opertran = tf.train.GradientDescentOptimizer(ratelern).minimize(tenscent)
with tf.name_scope('accu'):
tensaccu = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(tensmodllabl, 1), tf.argmax(tenstruelabl, 1)), tf.float32))
tf.summary.scalar("cent", tenscent)
tf.summary.scalar("accu", tensaccu)
opersmry = tf.summary.merge_all()
numbbtch = int(mnist.train.num_examples / sizebthc)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
objtwrit = tf.summary.FileWriter(pathclaslens + 'mlor')
objtwrit.add_graph(sess.graph)
for k in range(numbepoc):
for i in range(numbbtch):
featbtch, truelablbtch = mnist.train.next_batch(sizebthc)
temp, summary = sess.run([opertran, opersmry], feed_dict={tensfeat: featbtch, tenstruelabl: truelablbtch})
objtwrit.add_summary(summary, k * numbbtch + i)
def clas_lens():
for ratelern in [1e-4]:
for boolfulcdoub in [True]:
for boolconvdoub in [True]:
if boolconvdoub:
strgconv = 'numbconv0002'
else:
strgconv = 'numbconv0001'
if boolfulcdoub:
strgfulc = 'numbfulc0002'
else:
strgfulc = 'numbfulc0001'
strghypr = 'ratelern%04g%s%s' % (ratelern, strgconv, strgfulc)
print 'Starting run for %s...' % strghypr
clas_lens_wrap(ratelern, boolfulcdoub, boolconvdoub, strghypr)
globals().get(sys.argv[1])(*sys.argv[2:])
| |
#
# MODULE ADAPTED FROM DJANGO
#
# Original file in django.core.urlresolvers
#
#
#
import re
from inspect import isclass
from djpcms.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from djpcms.http import get_http
from djpcms.utils import force_str
_view_cache = {}
class Resolver404(Exception):
pass
def cachevalue(path, view, site, editsite, kwargs):
'''add a path in the cache dictionary. The value is composed by
(site,view,kwargs)
The parameters are:
:parameter path: absolute path to view (excluding leading slash).
:parameter view: instance of :class:`djpcms.views.baseview.djpcmsview`.
:parameter site: instance of the application site containing the ``view``
:parameter editsite: a site instance or None. If defined this is an ediding view.
:parameter kwargs: dictionary of view parameters.'''
from djpcms.views.baseview import editview
if editsite:
site = editsite
view = editview(view, site.settings.CONTENT_INLINE_EDITING['preurl'])
cached = (site,view,kwargs)
_view_cache[path] = cached
return cached
class ResolverMixin(object):
'''A lazy mixin class for resolving urls. The main function here is the ``resolve``
method'''
def load(self):
if getattr(self,'_urls',None) is None:
self._urls = self._load()
def __get_isloaded(self):
return getattr(self,'_urls',None) is not None
isloaded = property(__get_isloaded)
def urls(self):
self.load()
return self._urls
def _load(self):
pass
def editsite(self):
return False
def clearcache(self):
global _view_cache
self.resolver = None
self._urls = None
_view_cache.clear()
pagecache = getattr(self,'pagecache',None)
if pagecache:
pagecache.clear()
def clear(self):
self.clearcache()
def clean_path(self, environ):
'''
Clean url and redirect if needed
'''
path = environ['PATH_INFO']
url = path
if url:
modified = False
if '//' in path:
url = re.sub("/+" , "/", url)
modified = True
if not url.endswith('/'):
modified = True
url = '%s/' % url
if modified:
if not url.startswith('/'):
url = '/%s' % url
qs = environ['QUERY_STRING']
if qs and environ['method'] == 'GET':
url = '{0}?{1}'.format(url,qs)
return self.http.HttpResponseRedirect(url)
return url
def __get_http(self):
return get_http(self.settings.HTTP_LIBRARY)
http = property(__get_http, "Return the http library handle")
def make_url(self, regex, view, kwargs=None, name=None):
return RegexURLPattern(regex, view, kwargs, name)
def resolve(self, path, subpath = None, site = None, editsite = False):
global _view_cache
subpath = subpath if subpath is not None else path
cached = _view_cache.get(path,None)
if not cached:
if not getattr(self,'resolver',None):
urls = self.urls()
self.resolver = RegexURLResolver(r'^', urls)
if not site:
view = self.resolve_flat(subpath)
if view:
try:
site = self.get_site()
except:
site = self
return cachevalue(path, view, site, editsite, {})
try:
view, rurl, kwargs = self.resolver.resolve(subpath)
except Resolver404 as e:
raise self.http.Http404(str(e))
if isinstance(view,ResolverMixin):
if len(rurl) == 1:
edit = view.editsite()
if edit:
return view.resolve(path, rurl[0], None, edit)
else:
return view.resolve(path, rurl[0], site or view, editsite)
else:
raise self.http.Http404
else:
return cachevalue(path, view, site, editsite, kwargs)
else:
return cached
def resolve_flat(self, path):
'''Resolve flat pages'''
from djpcms.models import Page
from djpcms.views.baseview import pageview
try:
page = Page.objects.sitepage(url = '/'+path)
except:
return None
if not page.application_view:
return pageview(page)
class RegexURLPattern(object):
"""ORIGINAL CLASS FROM DJANGO www.djangoproject.com
Adapted for djpcms
"""
def __init__(self, regex, callback,
default_args=None,
name=None):
# regex is a string representing a regular expression.
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
self.regex = re.compile(regex, re.UNICODE)
self.callback = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern)
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return self.callback, args, kwargs
class RegexURLResolver(object):
"""This class ``resolve`` method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
ORIGINAL CLASS FROM DJANGO www.djangoproject.com
Adapted for djpcms
"""
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
# regex is a string representing a regular expression.
# urlconf_name is a string representing the module containing URLconfs.
self.regex = re.compile(regex, re.UNICODE)
self.urlconf_name = urlconf_name
if not isinstance(urlconf_name, basestring):
self._urlconf_module = self.urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = None
self._namespace_dict = None
self._app_dict = None
def __repr__(self):
return '<%s %s (%s:%s) %s>' % (self.__class__.__name__, self.urlconf_name, self.app_name, self.namespace, self.regex.pattern)
def _get_app_dict(self):
if self._app_dict is None:
self._populate()
return self._app_dict
app_dict = property(_get_app_dict)
def resolve(self, path):
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404, e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([(pattern.regex.pattern + ' ' + t) for t in sub_tried])
else:
tried.append(pattern.regex.pattern)
else:
if sub_match:
sub_match_dict = dict([(force_str(k), v) for k, v in match.groupdict().items()])
sub_match_dict.update(self.default_kwargs)
for k, v in sub_match[2].iteritems():
sub_match_dict[force_str(k)] = v
return sub_match[0], sub_match[1], sub_match_dict
tried.append(pattern.regex.pattern)
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path' : path})
def _get_urlconf_module(self):
try:
return self._urlconf_module
except AttributeError:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module
urlconf_module = property(_get_urlconf_module)
def _get_url_patterns(self):
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
raise ImproperlyConfigured("The included urlconf %s doesn't have any patterns in it" % self.urlconf_name)
return patterns
url_patterns = property(_get_url_patterns)
def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type)
try:
return get_callable(callback), {}
except (ImportError, AttributeError), e:
raise ViewDoesNotExist("Tried %s. Error was: %s" % (callback, str(e)))
def resolve404(self):
return self._resolve_special('404')
def resolve500(self):
return self._resolve_special('500')
| |
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.decorators.csrf import csrf_exempt
from django.http import QueryDict, HttpResponseNotAllowed, HttpRequest
from django.http.multipartparser import MultiPartParser
from zerver.models import UserProfile, get_client, get_user_profile_by_email
from zerver.lib.response import json_error, json_unauthorized
from django.shortcuts import resolve_url
from django.utils.decorators import available_attrs
from django.utils.timezone import now
from django.conf import settings
from zerver.lib.queue import queue_json_publish
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.utils import statsd
from zerver.exceptions import RateLimited
from zerver.lib.rate_limiter import incr_ratelimit, is_ratelimited, \
api_calls_left
from zerver.lib.request import REQ, has_request_variables, JsonableError, RequestVariableMissingError
from django.core.handlers import base
from functools import wraps
import base64
import logging
import cProfile
from io import BytesIO
from zerver.lib.mandrill_client import get_mandrill_client
from six.moves import zip, urllib
from six import text_type
from typing import Union, Any, Callable, Sequence, Dict, Optional, TypeVar
from zerver.lib.str_utils import force_bytes
if settings.ZILENCER_ENABLED:
from zilencer.models import get_deployment_by_domain, Deployment
else:
from mock import Mock
get_deployment_by_domain = Mock()
Deployment = Mock() # type: ignore # https://github.com/JukkaL/mypy/issues/1188
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
ViewFuncT = TypeVar('ViewFuncT', bound=Callable[..., HttpResponse])
def get_deployment_or_userprofile(role):
# type: (text_type) -> Union[UserProfile, Deployment]
return get_user_profile_by_email(role) if "@" in role else get_deployment_by_domain(role)
class _RespondAsynchronously(object):
pass
# Return RespondAsynchronously from an @asynchronous view if the
# response will be provided later by calling handler.zulip_finish(),
# or has already been provided this way. We use this for longpolling
# mode.
RespondAsynchronously = _RespondAsynchronously()
def asynchronous(method):
# type: (Callable[..., Union[HttpResponse, _RespondAsynchronously]]) -> Callable[..., Union[HttpResponse, _RespondAsynchronously]]
# TODO: this should be the correct annotation when mypy gets fixed: type:
# (Callable[[HttpRequest, base.BaseHandler, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]) ->
# Callable[[HttpRequest, Sequence[Any], Dict[str, Any]], Union[HttpResponse, _RespondAsynchronously]]
# TODO: see https://github.com/python/mypy/issues/1655
@wraps(method)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> Union[HttpResponse, _RespondAsynchronously]
return method(request, handler=request._tornado_handler, *args, **kwargs)
if getattr(method, 'csrf_exempt', False):
wrapper.csrf_exempt = True # type: ignore # https://github.com/JukkaL/mypy/issues/1170
return wrapper
def update_user_activity(request, user_profile):
# type: (HttpRequest, UserProfile) -> None
# update_active_status also pushes to rabbitmq, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == '/json/users/me/presence':
return
if hasattr(request, '_query'):
query = request._query
else:
query = request.META['PATH_INFO']
event={'query': query,
'user_profile_id': user_profile.id,
'time': datetime_to_timestamp(now()),
'client': request.client.name}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if (request.method != "POST"
and not (request.method == "SOCKET"
and request.META['zulip.emulated_method'] == "POST")):
if request.method == "SOCKET":
err_method = "SOCKET/%s" % (request.META['zulip.emulated_method'],)
else:
err_method = request.method
logging.warning('Method Not Allowed (%s): %s', err_method, request.path,
extra={'status_code': 405, 'request': request})
return HttpResponseNotAllowed(["POST"])
return func(request, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
def require_realm_admin(func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(func)
def wrapper(request, user_profile, *args, **kwargs):
# type: (HttpRequest, UserProfile, *Any, **Any) -> HttpResponse
if not user_profile.is_realm_admin:
raise JsonableError(_("Must be a realm administrator"))
return func(request, user_profile, *args, **kwargs)
return wrapper # type: ignore # https://github.com/python/mypy/issues/1927
from zerver.lib.user_agent import parse_user_agent
def get_client_name(request, is_json_view):
# type: (HttpRequest, bool) -> text_type
# If the API request specified a client in the request content,
# that has priority. Otherwise, extract the client from the
# User-Agent.
if 'client' in request.REQUEST:
return request.REQUEST['client']
elif "HTTP_USER_AGENT" in request.META:
user_agent = parse_user_agent(request.META["HTTP_USER_AGENT"])
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a json view
if user_agent["name"] != "ZulipDesktop" and is_json_view:
# Avoid changing the client string for browsers Once this
# is out to prod, we can name the field to something like
# Browser for consistency.
return "website"
else:
return user_agent["name"]
else:
# In the future, we will require setting USER_AGENT, but for
# now we just want to tag these requests so we can review them
# in logs and figure out the extent of the problem
if is_json_view:
return "website"
else:
return "Unspecified"
def process_client(request, user_profile, is_json_view=False, client_name=None):
# type: (HttpRequest, UserProfile, bool, Optional[text_type]) -> None
if client_name is None:
client_name = get_client_name(request, is_json_view)
# Transitional hack for early 2014. Eventually the ios clients
# will all report ZulipiOS, and we can remove the next couple lines.
if client_name == 'ios':
client_name = 'ZulipiOS'
request.client = get_client(client_name)
update_user_activity(request, user_profile)
def validate_api_key(role, api_key, is_webhook=False):
# type: (text_type, text_type, bool) -> Union[UserProfile, Deployment]
# Remove whitespace to protect users from trivial errors.
role, api_key = role.strip(), api_key.strip()
try:
profile = get_deployment_or_userprofile(role)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid user: %s") % (role,))
except Deployment.DoesNotExist:
raise JsonableError(_("Invalid deployment: %s") % (role,))
if api_key != profile.api_key:
if len(api_key) != 32:
reason = _("Incorrect API key length (keys should be 32 "
"characters long) for role '%s'")
else:
reason = _("Invalid API key for role '%s'")
raise JsonableError(reason % (role,))
if not profile.is_active:
raise JsonableError(_("Account not active"))
if profile.is_incoming_webhook and not is_webhook:
raise JsonableError(_("Account is not valid to post webhook messages"))
try:
if profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
except AttributeError:
# Deployment objects don't have realms
pass
return profile
# Use this for webhook views that don't get an email passed in.
def api_key_only_webhook_view(client_name):
# type: (text_type) -> Callable[..., HttpResponse]
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, api_key=REQ(),
*args, **kwargs):
# type: (HttpRequest, text_type, *Any, **Any) -> HttpResponse
try:
user_profile = UserProfile.objects.get(api_key=api_key)
except UserProfile.DoesNotExist:
raise JsonableError(_("Invalid API key"))
if not user_profile.is_active:
raise JsonableError(_("Account not active"))
if user_profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
request.user = user_profile
request._email = user_profile.email
webhook_client_name = "Zulip{}Webhook".format(client_name)
process_client(request, user_profile, client_name=webhook_client_name)
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain='all')
return view_func(request, user_profile, request.client, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# From Django 1.8, modified to leave off ?next=/
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
# type: (text_type, Optional[text_type], text_type) -> HttpResponseRedirect
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urllib.parse.urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
# Don't add ?next=/, to keep our URLs clean
if next != '/':
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urllib.parse.urlunparse(login_url_parts))
# From Django 1.8
def user_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME):
# type: (Callable[[UserProfile], bool], Optional[text_type], text_type) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if test_func(request):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def logged_in_and_active(request):
# type: (HttpRequest) -> bool
if not request.user.is_authenticated():
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return True
# Based on Django 1.8's @login_required
def zulip_login_required(function=None,
redirect_field_name=REDIRECT_FIELD_NAME,
login_url=settings.HOME_NOT_LOGGED_IN):
# type: (Optional[Callable[..., HttpResponse]], text_type, text_type) -> Union[Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]], Callable[..., HttpResponse]]
actual_decorator = user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if function:
return actual_decorator(function)
return actual_decorator
def zulip_internal(view_func):
# type: (ViewFuncT) -> ViewFuncT
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
request._query = view_func.__name__
if request.user.realm.domain != 'zulip.com':
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
request._email = request.user.email
process_client(request, request.user)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
# authenticated_api_view will add the authenticated user's
# user_profile to the view function's arguments list, since we have to
# look it up anyway. It is deprecated in favor on the REST API
# versions.
def authenticated_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(request, email=REQ(), api_key=REQ(default=None),
api_key_legacy=REQ('api-key', default=None),
*args, **kwargs):
# type: (HttpRequest, text_type, Optional[text_type], Optional[text_type], *Any, **Any) -> HttpResponse
if not api_key and not api_key_legacy:
raise RequestVariableMissingError("api_key")
elif not api_key:
api_key = api_key_legacy
user_profile = validate_api_key(email, api_key, is_webhook)
request.user = user_profile
request._email = user_profile.email
process_client(request, user_profile)
# Apply rate limiting
limited_func = rate_limit()(view_func)
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP Basic
# authentication.
def authenticated_rest_api_view(is_webhook=False):
# type: (bool) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
def _wrapped_view_func(view_func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META['HTTP_AUTHORIZATION'].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
return json_error(_("Only Basic authentication is supported."))
role, api_key = base64.b64decode(force_bytes(credentials)).decode('utf-8').split(":")
except ValueError:
json_error(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized("Missing authorization header for basic auth")
# Now we try to do authentication or die
try:
# Could be a UserProfile or a Deployment
profile = validate_api_key(role, api_key, is_webhook)
except JsonableError as e:
return json_unauthorized(e.error)
request.user = profile
process_client(request, profile)
if isinstance(profile, UserProfile):
request._email = profile.email
else:
assert isinstance(profile, Deployment) # type: ignore # https://github.com/python/mypy/issues/1720#issuecomment-228596830
request._email = "deployment:" + role
profile.rate_limits = ""
# Apply rate limiting
return rate_limit()(view_func)(request, profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(request.META, BytesIO(request.body),
request.upload_handlers, request.encoding).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(request, view_func, *args, **kwargs):
# type: (HttpRequest, Callable[..., HttpResponse], *Any, **Any) -> HttpResponse
if not request.user.is_authenticated():
return json_error(_("Not logged in"), status=401)
user_profile = request.user
if not user_profile.is_active:
raise JsonableError(_("Account not active"))
if user_profile.realm.deactivated:
raise JsonableError(_("Realm for account has been deactivated"))
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
process_client(request, user_profile, True)
request._email = user_profile.email
return view_func(request, user_profile, *args, **kwargs)
# Checks if the request is a POST request and that the user is logged
# in. If not, return an error (the @login_required behavior of
# redirecting to a login page doesn't make sense for json views)
def authenticated_json_post_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@require_post
@has_request_variables
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def authenticated_json_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@wraps(view_func)
def _wrapped_view_func(request,
*args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
return authenticate_log_and_execute_json(request, view_func, *args, **kwargs)
return _wrapped_view_func # type: ignore # https://github.com/python/mypy/issues/1927
def is_local_addr(addr):
# type: (text_type) -> bool
return addr in ('127.0.0.1', '::1')
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request):
# type: (HttpRequest) -> bool
return (is_local_addr(request.META['REMOTE_ADDR'])
and request.POST.get('secret') == settings.SHARED_SECRET)
def client_is_exempt_from_rate_limiting(request):
# type: (HttpRequest) -> bool
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
return ((request.client and request.client.name.lower() == 'internal')
and (is_local_addr(request.META['REMOTE_ADDR']) or
settings.DEBUG_RATE_LIMITING))
def internal_notify_view(view_func):
# type: (ViewFuncT) -> ViewFuncT
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_view_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
if not authenticate_notify(request):
return json_error(_('Access denied'), status=403)
if not hasattr(request, '_tornado_handler'):
# We got called through the non-Tornado server somehow.
# This is not a security check; it's an internal assertion
# to help us find bugs.
raise RuntimeError('notify view called with no Tornado handler')
request._email = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_view_func
# Converter functions for use with has_request_variables
def to_non_negative_int(x):
# type: (float) -> int
x = int(x)
if x < 0:
raise ValueError("argument is negative")
return x
def flexible_boolean(boolean):
# type: (text_type) -> bool
"""Returns True for any of "1", "true", or "True". Returns False otherwise."""
if boolean in ("1", "true", "True"):
return True
else:
return False
def statsd_increment(counter, val=1):
# type: (text_type, int) -> Callable[[Callable[..., Any]], Callable[..., Any]]
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return wrapped_func
return wrapper
def rate_limit_user(request, user, domain):
# type: (HttpRequest, UserProfile, text_type) -> None
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
ratelimited, time = is_ratelimited(user, domain)
request._ratelimit_applied_limits = True
request._ratelimit_secs_to_freedom = time
request._ratelimit_over_limit = ratelimited
# Abort this request if the user is over her rate limits
if ratelimited:
statsd.incr("ratelimiter.limited.%s.%s" % (type(user), user.id))
raise RateLimited()
incr_ratelimit(user, domain)
calls_remaining, time_reset = api_calls_left(user, domain)
request._ratelimit_remaining = calls_remaining
request._ratelimit_secs_to_freedom = time_reset
def rate_limit(domain='all'):
# type: (text_type) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]
"""Rate-limits a view. Takes an optional 'domain' param if you wish to rate limit different
types of API calls independently.
Returns a decorator"""
def wrapper(func):
# type: (Callable[..., HttpResponse]) -> Callable[..., HttpResponse]
@wraps(func)
def wrapped_func(request, *args, **kwargs):
# type: (HttpRequest, *Any, **Any) -> HttpResponse
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
try:
user = request.user
except:
# TODO: This logic is not tested, and I'm not sure we are
# doing the right thing here.
user = None
if not user:
logging.error("Requested rate-limiting on %s but user is not authenticated!" % \
func.__name__)
return func(request, *args, **kwargs)
# Rate-limiting data is stored in redis
# We also only support rate-limiting authenticated
# views right now.
# TODO(leo) - implement per-IP non-authed rate limiting
rate_limit_user(request, user, domain)
return func(request, *args, **kwargs)
return wrapped_func
return wrapper
def profiled(func):
# type: (FuncT) -> FuncT
"""
This decorator should obviously be used only in a dev environment.
It works best when surrounding a function that you expect to be
called once. One strategy is to write a backend test and wrap the
test case with the profiled decorator.
You can run a single test case like this:
# edit zerver/tests/test_external.py and place @profiled above the test case below
./tools/test-backend zerver.tests.test_external.RateLimitTests.test_ratelimit_decrease
Then view the results like this:
./tools/show-profile-results.py test_ratelimit_decrease.profile
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
fn = func.__name__ + ".profile"
prof = cProfile.Profile()
retval = prof.runcall(func, *args, **kwargs)
prof.dump_stats(fn)
return retval
return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927
def uses_mandrill(func):
# type: (FuncT) -> FuncT
"""
This decorator takes a function with keyword argument "mail_client" and
fills it in with the mail_client for the Mandrill account.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
kwargs['mail_client'] = get_mandrill_client()
return func(*args, **kwargs)
return wrapped_func # type: ignore # https://github.com/python/mypy/issues/1927
| |
from django.contrib.admin.forms import forms
from muddery.utils.localiztion_handler import localize_form_fields
from muddery.utils.attributes_info_handler import CHARACTER_ATTRIBUTES_INFO, EQUIPMENT_ATTRIBUTES_INFO, FOOD_ATTRIBUTES_INFO
from muddery.worlddata.data_sets import DATA_SETS
def get_all_pocketable_objects():
"""
Get all objects that can be put in player's pockets.
"""
# available objects are common objects, foods skill books or equipments
objects = DATA_SETS.common_objects.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
foods = DATA_SETS.foods.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in foods])
skill_books = DATA_SETS.skill_books.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in skill_books])
equipments = DATA_SETS.equipments.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in equipments])
return choices
class GameSettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GameSettingsForm, self).__init__(*args, **kwargs)
choices = [("", "---------")]
objects = DATA_SETS.world_rooms.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['default_home_key'] = forms.ChoiceField(choices=choices, required=False)
self.fields['start_location_key'] = forms.ChoiceField(choices=choices, required=False)
self.fields['default_player_home_key'] = forms.ChoiceField(choices=choices, required=False)
choices = [("", "---------")]
objects = DATA_SETS.common_characters.objects.filter(typeclass="CLASS_PLAYER")
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['default_player_character_key'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.game_settings.model
fields = '__all__'
list_template = "common_list.html"
form_template = "common_form.html"
class ClassCategoriesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ClassCategoriesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.class_categories.model
fields = '__all__'
desc = 'Categories of classes.'
list_template = "common_list.html"
form_template = "common_form.html"
class TypeclassesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TypeclassesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.class_categories.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['category'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.typeclasses.model
fields = '__all__'
class EquipmentTypesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EquipmentTypesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.equipment_types.model
fields = '__all__'
class EquipmentPositionsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EquipmentPositionsForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.equipment_positions.model
fields = '__all__'
class CharacterCareersForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CharacterCareersForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.character_careers.model
fields = '__all__'
class QuestObjectiveTypesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestObjectiveTypesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quest_objective_types.model
fields = '__all__'
class EventTypesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EventTypesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.event_types.model
fields = '__all__'
class EventTriggerTypes(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EventTriggerTypes, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.event_trigger_types.model
fields = '__all__'
class QuestDependencyTypesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestDependencyTypesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quest_dependency_types.model
fields = '__all__'
class WorldAreasForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WorldAreasForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_AREA")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.image_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['background'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.world_areas.model
fields = '__all__'
class WorldRoomsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WorldRoomsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_ROOM")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.world_areas.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['location'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
choices = [("", "---------")]
objects = DATA_SETS.image_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['background'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.world_rooms.model
fields = '__all__'
class WorldExitsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WorldExitsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_EXIT")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.world_rooms.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['location'] = forms.ChoiceField(choices=choices)
self.fields['destination'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.world_exits.model
fields = '__all__'
class ExitLocksForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ExitLocksForm, self).__init__(*args, **kwargs)
#objects = models.world_exits.objects.filter(typeclass="CLASS_LOCKED_EXIT")
#choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
#self.fields['key'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.exit_locks.model
fields = '__all__'
class TwoWayExitsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TwoWayExitsForm, self).__init__(*args, **kwargs)
#objects = models.world_exits.objects.filter(typeclass="CLASS_LOCKED_EXIT")
#choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
#self.fields['key'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.two_way_exits.model
fields = '__all__'
class WorldObjectsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WorldObjectsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.get(key="CLASS_WORLD_OBJECT")
choices = [(objects.key, objects.name + " (" + objects.key + ")")]
objects = DATA_SETS.typeclasses.objects.get(key="CLASS_OBJECT_CREATOR")
choices.append((objects.key, objects.name + " (" + objects.key + ")"))
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.world_rooms.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['location'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.world_objects.model
fields = '__all__'
class WorldNPCsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WorldNPCsForm, self).__init__(*args, **kwargs)
# NPC's typeclass
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_CHARACTER")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
# NPC's location
objects = DATA_SETS.world_rooms.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['location'] = forms.ChoiceField(choices=choices)
# NPC's model
choices = [("", "---------")]
objects = DATA_SETS.character_models.objects.all()
model_keys = set([obj.key for obj in objects])
choices.extend([(model_key, model_key) for model_key in model_keys])
self.fields['model'] = forms.ChoiceField(choices=choices, required=False)
# NPC's icon
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.world_npcs.model
fields = '__all__'
class ObjectCreatorsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ObjectCreatorsForm, self).__init__(*args, **kwargs)
#objects = models.world_objects.objects.filter(typeclass="CLASS_OBJECT_CREATOR")
#choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
#self.fields['key'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.object_creators.model
fields = '__all__'
class CreatorLootListForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CreatorLootListForm, self).__init__(*args, **kwargs)
# providers must be object_creators
objects = DATA_SETS.world_objects.objects.filter(typeclass="CLASS_OBJECT_CREATOR")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['provider'] = forms.ChoiceField(choices=choices)
# available objects
choices = get_all_pocketable_objects()
self.fields['object'] = forms.ChoiceField(choices=choices)
# depends on quest
choices = [("", "---------")]
objects = DATA_SETS.quests.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['quest'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.creator_loot_list.model
fields = '__all__'
class CharacterLootListForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CharacterLootListForm, self).__init__(*args, **kwargs)
# providers can be world_npc or common_character
npcs = DATA_SETS.world_npcs.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in npcs]
characters = DATA_SETS.common_characters.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in characters])
self.fields['provider'] = forms.ChoiceField(choices=choices)
# available objects
choices = get_all_pocketable_objects()
self.fields['object'] = forms.ChoiceField(choices=choices)
# depends on quest
choices = [("", "---------")]
objects = DATA_SETS.quests.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['quest'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.character_loot_list.model
fields = '__all__'
class QuestRewardListForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestRewardListForm, self).__init__(*args, **kwargs)
# providers must be object_creators
objects = DATA_SETS.quests.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['provider'] = forms.ChoiceField(choices=choices)
# available objects
choices = get_all_pocketable_objects()
self.fields['object'] = forms.ChoiceField(choices=choices)
# depends on quest
choices = [("", "---------")]
objects = DATA_SETS.quests.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['quest'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quest_reward_list.model
fields = '__all__'
class CommonObjectsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CommonObjectsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_OBJECT")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.common_objects.model
fields = '__all__'
class FoodsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FoodsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(key="CLASS_FOOD")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
FOOD_ATTRIBUTES_INFO.set_form_fields(self)
class Meta:
model = DATA_SETS.foods.model
fields = '__all__'
class SkillBooksForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SkillBooksForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(key="CLASS_SKILL_BOOK")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
# skills
objects = DATA_SETS.skills.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['skill'] = forms.ChoiceField(choices=choices)
# icons
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.skill_books.model
fields = '__all__'
class CharacterAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CharacterAttributesForm, self).__init__(*args, **kwargs)
self.fields['field'].disabled = True;
localize_form_fields(self)
class Meta:
model = DATA_SETS.character_attributes_info.model
fields = '__all__'
class EquipmentAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EquipmentAttributesForm, self).__init__(*args, **kwargs)
self.fields['field'].disabled = True;
localize_form_fields(self)
class Meta:
model = DATA_SETS.equipment_attributes_info.model
fields = '__all__'
class FoodAttributesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(FoodAttributesForm, self).__init__(*args, **kwargs)
self.fields['field'].disabled = True;
localize_form_fields(self)
class Meta:
model = DATA_SETS.food_attributes_info.model
fields = '__all__'
class CharacterModelsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CharacterModelsForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
CHARACTER_ATTRIBUTES_INFO.set_form_fields(self)
class Meta:
model = DATA_SETS.character_models.model
fields = '__all__'
class CommonCharacterForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CommonCharacterForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_CHARACTER")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
# models
choices = [("", "---------")]
objects = DATA_SETS.character_models.objects.all()
model_keys = set([obj.key for obj in objects])
choices.extend([(model_key, model_key) for model_key in model_keys])
self.fields['model'] = forms.ChoiceField(choices=choices, required=False)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.common_characters.model
fields = '__all__'
class DefaultObjectsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DefaultObjectsForm, self).__init__(*args, **kwargs)
# all character's models
character_models = set([record.key for record in DATA_SETS.character_models.objects.all()])
choices = [(key, key) for key in character_models]
self.fields['character'] = forms.ChoiceField(choices=choices)
# available objects
choices = get_all_pocketable_objects()
self.fields['object'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.default_objects.model
fields = '__all__'
class ShopsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ShopsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_SHOP")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.shops.model
fields = '__all__'
class ShopGoodsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ShopGoodsForm, self).__init__(*args, **kwargs)
# all shops
objects = DATA_SETS.shops.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['shop'] = forms.ChoiceField(choices=choices)
# available objects
choices = get_all_pocketable_objects()
self.fields['object'] = forms.ChoiceField(choices=choices)
# Goods typeclasses
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_SHOP_GOODS")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
# available units are common objects
objects = DATA_SETS.common_objects.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['unit'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.shop_goods.model
fields = '__all__'
class NPCShopsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(NPCShopsForm, self).__init__(*args, **kwargs)
# All NPCs.
objects = DATA_SETS.world_npcs.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['npc'] = forms.ChoiceField(choices=choices)
# All shops.
objects = DATA_SETS.shops.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['shop'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.npc_shops.model
fields = '__all__'
class SkillsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SkillsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_SKILL")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.skills.model
fields = '__all__'
class DefaultSkillsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DefaultSkillsForm, self).__init__(*args, **kwargs)
# all character's models
character_models = set([record.key for record in DATA_SETS.character_models.objects.all()])
choices = [(key, key) for key in character_models]
self.fields['character'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.skills.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['skill'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.default_skills.model
fields = '__all__'
class NPCDialoguesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(NPCDialoguesForm, self).__init__(*args, **kwargs)
# All NPCs.
objects = DATA_SETS.world_npcs.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['npc'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.dialogues.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dialogue'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.npc_dialogues.model
fields = '__all__'
class QuestsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(category="CATE_QUEST")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quests.model
fields = '__all__'
class QuestObjectivesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestObjectivesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.quests.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['quest'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.quest_objective_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['type'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quest_objectives.model
fields = '__all__'
class QuestDependenciesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(QuestDependenciesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.quests.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['quest'] = forms.ChoiceField(choices=choices)
self.fields['dependency'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.quest_dependency_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['type'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.quest_dependencies.model
fields = '__all__'
class DialogueQuestDependenciesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DialogueQuestDependenciesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.dialogues.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dialogue'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.quests.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dependency'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.quest_dependency_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['type'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.dialogue_quest_dependencies.model
fields = '__all__'
class EquipmentsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EquipmentsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.typeclasses.objects.filter(key="CLASS_EQUIPMENT")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['typeclass'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.equipment_positions.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['position'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.equipment_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['type'] = forms.ChoiceField(choices=choices)
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
EQUIPMENT_ATTRIBUTES_INFO.set_form_fields(self)
class Meta:
model = DATA_SETS.equipments.model
fields = '__all__'
class CareerEquipmentsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(CareerEquipmentsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.character_careers.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['career'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.equipment_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['equipment'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.career_equipments.model
fields = '__all__'
class EventDataForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EventDataForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.event_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['type'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.event_trigger_types.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['trigger_type'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.event_data.model
fields = '__all__'
class EventAttacksForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EventAttacksForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.event_data.objects.filter(type="EVENT_ATTACK")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['key'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.common_characters.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['mob'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.event_attacks.model
fields = '__all__'
class EventDialoguesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(EventDialoguesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.event_data.objects.filter(type="EVENT_DIALOGUE")
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['key'] = forms.ChoiceField(choices=choices)
objects = DATA_SETS.dialogues.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dialogue'] = forms.ChoiceField(choices=choices)
# NPCs
choices = [("", "---------")]
objects = DATA_SETS.world_npcs.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['npc'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.event_dialogues.model
fields = '__all__'
class DialoguesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DialoguesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.dialogues.model
fields = '__all__'
class DialogueRelationsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DialogueRelationsForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.dialogues.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dialogue'] = forms.ChoiceField(choices=choices)
self.fields['next_dlg'] = forms.ChoiceField(choices=choices)
localize_form_fields(self)
class Meta:
model = DATA_SETS.dialogue_relations.model
fields = '__all__'
class DialogueSentencesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(DialogueSentencesForm, self).__init__(*args, **kwargs)
objects = DATA_SETS.dialogues.objects.all()
choices = [(obj.key, obj.name + " (" + obj.key + ")") for obj in objects]
self.fields['dialogue'] = forms.ChoiceField(choices=choices)
# dialogue's icon
choices = [("", "---------")]
objects = DATA_SETS.icon_resources.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['icon'] = forms.ChoiceField(choices=choices, required=False)
choices = [("", "---------")]
objects = DATA_SETS.quests.objects.all()
choices.extend([(obj.key, obj.name + " (" + obj.key + ")") for obj in objects])
self.fields['provide_quest'] = forms.ChoiceField(choices=choices, required=False)
self.fields['complete_quest'] = forms.ChoiceField(choices=choices, required=False)
localize_form_fields(self)
class Meta:
model = DATA_SETS.dialogue_sentences.model
fields = '__all__'
class LocalizedStringsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(LocalizedStringsForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.localized_strings.model
fields = '__all__'
class ImageResourcesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ImageResourcesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.image_resources.model
fields = ('key', 'name', 'resource',)
class IconResourcesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(IconResourcesForm, self).__init__(*args, **kwargs)
localize_form_fields(self)
class Meta:
model = DATA_SETS.icon_resources.model
fields = ('key', 'name', 'resource',)
| |
#!/usr/bin/env python
"""A keyword index of client machines.
An index of client machines, associating likely identifiers to client IDs.
"""
from grr.lib import aff4
from grr.lib import keyword_index
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
# The system's primary client index.
MAIN_INDEX = rdfvalue.RDFURN("aff4:/client_index")
class ClientIndex(keyword_index.AFF4KeywordIndex):
"""An index of client machines.
"""
START_TIME_PREFIX = "start_date:"
START_TIME_PREFIX_LEN = len(START_TIME_PREFIX)
END_TIME_PREFIX = "end_date:"
END_TIME_PREFIX_LEN = len(END_TIME_PREFIX)
# We accept and return client URNs, but store client ids,
# e.g. "C.00aaeccbb45f33a3".
def _ClientIdFromURN(self, urn):
return urn.Basename()
def _URNFromClientID(self, client_id):
return rdf_client.ClientURN(client_id)
def _NormalizeKeyword(self, keyword):
return keyword.lower()
def _AnalyzeKeywords(self, keywords):
start_time = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("180d")
end_time = rdfvalue.RDFDatetime(self.LAST_TIMESTAMP)
filtered_keywords = []
unversioned_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime(k[self.START_TIME_PREFIX_LEN:])
except ValueError:
pass
elif k.startswith(self.END_TIME_PREFIX):
try:
time = rdfvalue.RDFDatetime()
time.ParseFromHumanReadable(k[self.END_TIME_PREFIX_LEN:], eoy=True)
end_time = time
except (TypeError, ValueError):
pass
elif k[0] == "+":
kw = k[1:]
filtered_keywords.append(kw)
unversioned_keywords.append(kw)
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return start_time, end_time, filtered_keywords, unversioned_keywords
def LookupClients(self, keywords):
"""Returns a list of client URNs associated with keywords.
Args:
keywords: The list of keywords to search by.
Returns:
A list of client URNs.
Raises:
ValueError: A string (single keyword) was passed instead of an iterable.
"""
if isinstance(keywords, basestring):
raise ValueError("Keywords should be an iterable, not a string (got %s)."
% keywords)
start_time, end_time, filtered_keywords, unversioned_keywords = (
self._AnalyzeKeywords(keywords)
)
# If there are any unversioned keywords in the query, add the universal
# keyword so we are assured to have an accurate last update time for each
# client.
last_seen_map = None
if unversioned_keywords:
filtered_keywords.append(".")
last_seen_map = {}
# TODO(user): Make keyword index datetime aware so that
# AsMicroSecondsFromEpoch is unecessary.
raw_results = self.Lookup(map(self._NormalizeKeyword, filtered_keywords),
start_time=start_time.AsMicroSecondsFromEpoch(),
end_time=end_time.AsMicroSecondsFromEpoch(),
last_seen_map=last_seen_map)
old_results = set()
for keyword in unversioned_keywords:
for result in raw_results:
if last_seen_map[(keyword, result)] < last_seen_map[(".", result)]:
old_results.add(result)
raw_results -= old_results
return map(self._URNFromClientID, raw_results)
def ReadClientPostingLists(self, keywords):
"""Looks up all clients associated with any of the given keywords.
Args:
keywords: A list of keywords we are interested in.
Returns:
A dict mapping each keyword to a list of matching clients.
"""
start_time, end_time, filtered_keywords, _ = self._AnalyzeKeywords(keywords)
# TODO(user): Make keyword index datetime aware so that
# AsMicroSecondsFromEpoch is unecessary.
return self.ReadPostingLists(
filtered_keywords, start_time=start_time.AsMicroSecondsFromEpoch(),
end_time=end_time.AsMicroSecondsFromEpoch())
def AnalyzeClient(self, client):
"""Finds the client_id and keywords for a client.
Args:
client: A VFSGRRClient record to find keywords for.
Returns:
A tuple (client_id, keywords) where client_id is the client identifier and
keywords is a list of keywords related to client.
"""
client_id = self._ClientIdFromURN(client.urn)
# Start with both the client id itself, and a universal keyword, used to
# find all clients.
#
# TODO(user): Remove the universal keyword once we have a better way
# to do this, i.e., once we have a storage library which can list all
# clients directly.
keywords = [self._NormalizeKeyword(client_id), "."]
def TryAppend(prefix, keyword):
if keyword:
keyword_string = self._NormalizeKeyword(utils.SmartStr(keyword))
keywords.append(keyword_string)
if prefix:
keywords.append(prefix + ":" + keyword_string)
def TryAppendPrefixes(prefix, keyword, delimiter):
TryAppend(prefix, keyword)
segments = str(keyword).split(delimiter)
for i in range(1, len(segments)):
TryAppend(prefix, delimiter.join(segments[0:i]))
return len(segments)
def TryAppendIP(ip):
TryAppend("ip", ip)
# IP4v?
if TryAppendPrefixes("ip", str(ip), ".") == 4:
return
# IP6v?
TryAppendPrefixes("ip", str(ip), ":")
def TryAppendMac(mac):
TryAppend("mac", mac)
if len(mac) == 12:
# If looks like a mac address without ":" symbols, also add the keyword
# with them.
TryAppend("mac", ":".join([mac[i:i + 2] for i in range(0, 12, 2)]))
s = client.Schema
TryAppend("host", client.Get(s.HOSTNAME))
TryAppendPrefixes("host", client.Get(s.HOSTNAME), "-")
TryAppend("host", client.Get(s.FQDN))
TryAppendPrefixes("host", client.Get(s.FQDN), ".")
TryAppend("", client.Get(s.SYSTEM))
TryAppend("", client.Get(s.UNAME))
TryAppend("", client.Get(s.OS_RELEASE))
TryAppend("", client.Get(s.OS_VERSION))
TryAppend("", client.Get(s.KERNEL))
TryAppend("", client.Get(s.ARCH))
for user in client.Get(s.USER, []):
TryAppend("user", user.username)
TryAppend("", user.full_name)
if user.full_name:
for name in user.full_name.split():
# full_name often includes nicknames and similar, wrapped in
# punctuation, e.g. "Thomas 'TJ' Jones". We remove the most common
# wrapping characters.
TryAppend("", name.strip("\"'()"))
for username in client.Get(s.USERNAMES, []):
TryAppend("user", username)
for interface in client.Get(s.LAST_INTERFACES, []):
if interface.mac_address:
TryAppendMac(interface.mac_address.human_readable_address)
for ip in interface.GetIPAddresses():
TryAppendIP(ip)
# We should have all mac and ip addresses already, but some test data only
# has it attached directly, so just in case we look there also.
if client.Get(s.MAC_ADDRESS):
for mac in str(client.Get(s.MAC_ADDRESS)).split("\n"):
TryAppendMac(mac)
for ip_list in client.Get(s.HOST_IPS, []):
for ip in str(ip_list).split("\n"):
TryAppendIP(ip)
client_info = client.Get(s.CLIENT_INFO)
if client_info:
TryAppend("client", client_info.client_name)
if client_info.labels:
for label in client_info.labels:
TryAppend("label", label)
for label in client.GetLabelsNames():
TryAppend("label", label)
return (client_id, keywords)
def AddClient(self, client, **kwargs):
"""Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
**kwargs: Additional arguments to pass to the datastore.
"""
self.AddKeywordsForName(*self.AnalyzeClient(client), **kwargs)
def RemoveClientLabels(self, client):
"""Removes all labels for a given client object.
Args:
client: A VFSGRRClient record.
"""
keywords = []
for label in client.GetLabelsNames():
keyword = self._NormalizeKeyword(utils.SmartStr(label))
# This might actually delete a keyword with the same name as the label (if
# there is one). Usually the client labels will get added right after the
# deletion of the old labels though so this can only be abused to destroy
# historic index data, the search functionality will not be affected.
keywords.append(keyword)
keywords.append("label:%s" % keyword)
self.RemoveKeywordsForName(self._ClientIdFromURN(client.urn), keywords)
def GetClientURNsForHostnames(hostnames, token=None):
"""Gets all client_ids for a given list of hostnames or FQDNS.
Args:
hostnames: A list of hostnames / FQDNs.
token: An ACL token.
Returns:
A dict with a list of all known GRR client_ids for each hostname.
"""
index = aff4.FACTORY.Create(
MAIN_INDEX, aff4_type="ClientIndex", mode="rw", object_exists=True,
token=token)
keywords = set()
for hostname in hostnames:
if hostname.startswith("host:"):
keywords.add(hostname)
else:
keywords.add("host:%s" % hostname)
results = index.ReadClientPostingLists(keywords)
result = {}
for keyword, hits in results.iteritems():
result[keyword[len("host:"):]] = hits
return result
def BulkLabel(label, hostnames, token, client_index=None):
"""Assign a label to a group of clients based on hostname.
Sets a label as an identifier to a group of clients. Removes the label from
other clients.
This can be used to automate labeling clients based on externally derived
attributes, for example machines assigned to particular users, or machines
fulfilling particular roles.
Args:
label: The label to apply.
hostnames: The collection of hostnames that should have the label.
token: The authentication token.
client_index: An optional client index to use. If not provided, use the
default client index.
"""
if client_index is None:
client_index = aff4.FACTORY.Create(
MAIN_INDEX, aff4_type="ClientIndex", mode="rw", object_exists=True,
token=token)
fqdns = set()
for hostname in hostnames:
fqdns.add(hostname.lower())
# Find clients with this label.
label_index = aff4.FACTORY.Open("aff4:/index/labels/clients", token=token)
labelled_urns = label_index.FindUrnsByLabel(label)
# If a labelled client fqdn isn't in the set of target fqdns remove the label.
# Labelled clients with a target fqdn need no action and are removed from the
# set of target fqdns.
for client in aff4.FACTORY.MultiOpen(labelled_urns, token=token,
aff4_type="VFSGRRClient", mode="rw"):
fqdn = utils.SmartStr(client.Get("FQDN")).lower()
if fqdn not in fqdns:
client_index.RemoveClientLabels(client)
client.RemoveLabels(label, owner="GRR")
client.Flush()
client_index.AddClient(client)
else:
fqdns.discard(fqdn)
# The residual set of fqdns needs labelling.
# Get the latest URN for these clients and open them to add the label.
urns = []
for fqdn in fqdns:
urns.extend(client_index.LookupClients(["+host:%s" % fqdn]))
for client in aff4.FACTORY.MultiOpen(urns, token=token,
aff4_type="VFSGRRClient", mode="rw"):
client.AddLabels(label, owner="GRR")
client.Flush()
client_index.AddClient(client)
| |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
## END PATH CONFIGURATION
## DEBUG CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: http://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
## END DEBUG CONFIGURATION
## MANAGER CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: http://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
## END MANAGER CONFIGURATION
## DATABASE CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
## END DATABASE CONFIGURATION
## GENERAL CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Africa/Nairobi'
# See: http://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: http://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: http://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: http://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: http://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
## END GENERAL CONFIGURATION
## MEDIA CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: http://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
## END MEDIA CONFIGURATION
## STATIC FILE CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: http://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: http://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: http://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
## END STATIC FILE CONFIGURATION
## SECRET CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = r"+rk7txvm*l^3n9c-=#9je1qzi0xvq!^#1f&we2u463%pa63hvm"
## END SECRET CONFIGURATION
## FIXTURE CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
## END FIXTURE CONFIGURATION
## TEMPLATE CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: http://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
## END TEMPLATE CONFIGURATION
## MIDDLEWARE CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.gzip.GZipMiddleware',
'core.middleware.MinifyHTMLMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
## END MIDDLEWARE CONFIGURATION
## URL CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
## END URL CONFIGURATION
## APP CONFIGURATION
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.flatpages',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
# 'django.contrib.humanize',
)
THIRD_PARTY_APPS = (
'addthis',
'bootstrap-pagination',
'ckeditor',
'ganalytics',
'haystack',
'intensedebate',
'sorl.thumbnail',
'south',
'storages'
)
LOCAL_APPS = (
'articles',
'blog',
'core',
'menu',
'programs',
)
# See: http://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
## END APP CONFIGURATION
## LOGGING CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
## END LOGGING CONFIGURATION
## WSGI CONFIGURATION
# See: http://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
## END WSGI CONFIGURATION
## CKEDITOR CONFIGURATION
# See: http://github.com/shaunsephton/django-ckeditor#required
CKEDITOR_UPLOAD_PATH = normpath(join(SITE_ROOT, 'ckeditor'))
# See: http://github.com/shaunsephton/django-ckeditor#optional
CKEDITOR_CONFIGS = {
'default': {
'toolbar_Full': [
['Source', '-', 'Save', 'NewPage', 'DocProps', 'Preview', 'Print'],
['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo','Redo'],
['Find', 'Replace', '-', 'SelectAll', '-', 'SpellChecker', 'Scayt'],
['Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'RemoveFormat'],
['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-',
'Blockquote', 'CreateDiv', '-', 'JustifyLeft', 'JustifyCenter',
'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl'],
['Link', 'Unlink', 'Anchor'],
['Table', 'HorizontalRule', 'SpecialChar'],
['Format'],
['Maximize', 'ShowBlocks', '-', 'About'],
],
'forcePasteAsPlainText': True,
'scayt_autoStartup': True,
'scayt_sLang': 'en_GB',
'startupOutlineBlocks': True,
},
}
## END CKEDITOR CONFIGURATION
## HAYSTACK CONFIGURATION
# See: http://django-haystack.readthedocs.org/en/latest/tutorial.html#configuration
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': normpath(join(SITE_ROOT, 'whoosh_index')),
},
}
## END HAYSTACK CONFIGURATION
## THUMBNAIL CONFIGURATION
# See: http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html#thumbnail-progressive
THUMBNAIL_PROGRESSIVE = False
# See: http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html#thumbnail-upscale
THUMBNAIL_UPSCALE = False
# See: http://sorl-thumbnail.readthedocs.org/en/latest/reference/settings.html#thumbnail-format
THUMBNAIL_FORMAT = 'PNG'
## THUMBNAIL CONFIGURATION
| |
"""
kombu.utils
===========
Internal utilities.
"""
from __future__ import absolute_import, print_function, unicode_literals
import importlib
import numbers
import random
import sys
from contextlib import contextmanager
from itertools import count, repeat
from functools import wraps
from time import sleep
from uuid import UUID, uuid4 as _uuid4, _uuid_generate_random
from kombu.five import items, reraise, string_t
from .encoding import default_encode, safe_repr as _safe_repr
try:
import ctypes
except:
ctypes = None # noqa
try:
from io import UnsupportedOperation
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
except ImportError: # pragma: no cover
# Py2
FILENO_ERRORS = (AttributeError, ValueError) # noqa
__all__ = ['EqualityDict', 'uuid', 'maybe_list',
'fxrange', 'fxrangemax', 'retry_over_time',
'emergency_dump_state', 'cached_property',
'reprkwargs', 'reprcall', 'nested', 'fileno', 'maybe_fileno']
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name('celery.concurrency.processes.TaskPool')
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name('default', {
... 'default': 'celery.concurrency.processes.TaskPool'})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, string_t):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError as exc:
reraise(ValueError,
ValueError("Couldn't import {0!r}: {1}".format(name, exc)),
sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
class HashedSeq(list):
"""type used for hash() to make sure the hash is not generated
multiple times."""
__slots__ = 'hashvalue'
def __init__(self, *seq):
self[:] = seq
self.hashvalue = hash(seq)
def __hash__(self):
return self.hashvalue
def eqhash(o):
try:
return o.__eqhash__()
except AttributeError:
return hash(o)
class EqualityDict(dict):
def __getitem__(self, key):
h = eqhash(key)
if h not in self:
return self.__missing__(key)
return dict.__getitem__(self, h)
def __setitem__(self, key, value):
return dict.__setitem__(self, eqhash(key), value)
def __delitem__(self, key):
return dict.__delitem__(self, eqhash(key))
def uuid4():
# Workaround for http://bugs.python.org/issue4607
if ctypes and _uuid_generate_random: # pragma: no cover
buffer = ctypes.create_string_buffer(16)
_uuid_generate_random(buffer)
return UUID(bytes=buffer.raw)
return _uuid4()
def uuid():
"""Generate a unique id, having - hopefully - a very small chance of
collision.
For now this is provided by :func:`uuid.uuid4`.
"""
return str(uuid4())
gen_unique_id = uuid
def maybe_list(v):
if v is None:
return []
if hasattr(v, '__iter__'):
return v
return [v]
def fxrange(start=1.0, stop=None, step=1.0, repeatlast=False):
cur = start * 1.0
while 1:
if not stop or cur <= stop:
yield cur
cur += step
else:
if not repeatlast:
break
yield cur - step
def fxrangemax(start=1.0, stop=None, step=1.0, max=100.0):
sum_, cur = 0, start * 1.0
while 1:
if sum_ >= max:
break
yield cur
if stop:
cur = min(cur + step, stop)
else:
cur += step
sum_ += cur
def retry_over_time(fun, catch, args=[], kwargs={}, errback=None,
max_retries=None, interval_start=2, interval_step=2,
interval_max=30, callback=None):
"""Retry the function over and over until max retries is exceeded.
For each retry we sleep a for a while before we try again, this interval
is increased for every retry until the max seconds is reached.
:param fun: The function to try
:param catch: Exceptions to catch, can be either tuple or a single
exception class.
:keyword args: Positional arguments passed on to the function.
:keyword kwargs: Keyword arguments passed on to the function.
:keyword errback: Callback for when an exception in ``catch`` is raised.
The callback must take three arguments: ``exc``, ``interval_range`` and
``retries``, where ``exc`` is the exception instance, ``interval_range``
is an iterator which return the time in seconds to sleep next, and
``retries`` is the number of previous retries.
:keyword max_retries: Maximum number of retries before we give up.
If this is not set, we will retry forever.
:keyword interval_start: How long (in seconds) we start sleeping between
retries.
:keyword interval_step: By how much the interval is increased for each
retry.
:keyword interval_max: Maximum number of seconds to sleep between retries.
"""
retries = 0
interval_range = fxrange(interval_start,
interval_max + interval_start,
interval_step, repeatlast=True)
for retries in count():
try:
return fun(*args, **kwargs)
except catch as exc:
if max_retries and retries >= max_retries:
raise
if callback:
callback()
tts = float(errback(exc, interval_range, retries) if errback
else next(interval_range))
if tts:
for _ in range(int(tts)):
if callback:
callback()
sleep(1.0)
# sleep remainder after int truncation above.
sleep(abs(int(tts) - tts))
def emergency_dump_state(state, open_file=open, dump=None, stderr=None):
from pprint import pformat
from tempfile import mktemp
stderr = sys.stderr if stderr is None else stderr
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
print('EMERGENCY DUMP STATE TO FILE -> {0} <-'.format(persist), ## noqa
file=stderr)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception as exc:
print( # noqa
'Cannot pickle state: {0!r}. Fallback to pformat.'.format(exc),
file=stderr,
)
fh.write(default_encode(pformat(state)))
finally:
fh.flush()
fh.close()
return persist
class cached_property(object):
"""Property descriptor that caches the return value
of the get function.
*Examples*
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj):
if obj is None:
return self
try:
value = obj.__dict__.pop(self.__name__)
except KeyError:
pass
else:
if self.__del is not None:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
def reprkwargs(kwargs, sep=', ', fmt='{0}={1}'):
return sep.join(fmt.format(k, _safe_repr(v)) for k, v in items(kwargs))
def reprcall(name, args=(), kwargs={}, sep=', '):
return '{0}({1}{2}{3})'.format(
name, sep.join(map(_safe_repr, args or ())),
(args and kwargs) and sep or '',
reprkwargs(kwargs, sep),
)
@contextmanager
def nested(*managers): # pragma: no cover
# flake8: noqa
"""Combine multiple context managers into a single nested
context manager."""
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(exc[0], exc[1], exc[2])
finally:
del(exc)
def shufflecycle(it):
it = list(it) # don't modify callers list
shuffle = random.shuffle
for _ in repeat(None):
shuffle(it)
yield it[0]
def entrypoints(namespace):
try:
from pkg_resources import iter_entry_points
except ImportError:
return iter([])
return ((ep, ep.load()) for ep in iter_entry_points(namespace))
class ChannelPromise(object):
def __init__(self, contract):
self.__contract__ = contract
def __call__(self):
try:
return self.__value__
except AttributeError:
value = self.__value__ = self.__contract__()
return value
def __repr__(self):
try:
return repr(self.__value__)
except AttributeError:
return '<promise: 0x{0:x}>'.format(id(self.__contract__))
def escape_regex(p, white=''):
# what's up with re.escape? that code must be neglected or someting
return ''.join(c if c.isalnum() or c in white
else ('\\000' if c == '\000' else '\\' + c)
for c in p)
def fileno(f):
if isinstance(f, numbers.Integral):
return f
return f.fileno()
def maybe_fileno(f):
"""Get object fileno, or :const:`None` if not defined."""
try:
return fileno(f)
except FILENO_ERRORS:
pass
def coro(gen):
@wraps(gen)
def wind_up(*args, **kwargs):
it = gen(*args, **kwargs)
next(it)
return it
return wind_up
| |
# -*- coding: utf-8 -*-
import numpy as np
# import scipy.ndimage
from scipy.stats import rv_continuous
from ..types import transformationType
def makeGaussian(x, y, x0, y0, sx, sy, rho, A):
num = (
(x - x0) ** 2 / sx ** 2
- 2 * rho / (sx * sy) * (x - x0) * (y - y0)
+ (y - y0) ** 2 / sy ** 2
)
denom = 2 * (1 - rho ** 2)
return A * np.exp(-num / denom) # /(2*np.pi*sx*sy*np.sqrt(1-rho**2))
def makeGaussianAngle(x, y, x0, y0, sx, sy, angle, A):
cosa = np.cos(angle)
sina = np.sin(angle)
sin2a = np.sin(2 * angle)
varx = sx ** 2
vary = sy ** 2
a = cosa ** 2 / (2 * varx) + sina ** 2 / (2 * vary)
b = -sin2a / (4 * varx) + sin2a / (4 * vary)
c = sina ** 2 / (2 * varx) + cosa ** 2 / (2 * vary)
num = a * (x - x0) ** 2 - 2 * b * (x - x0) * (y - y0) + c * (y - y0) ** 2
return A * np.exp(-num)
def gettransformedimage(x, y, data, angle=False):
ret = np.zeros(x.shape, dtype=float)
if angle:
proc = makeGaussianAngle
else:
proc = makeGaussian
for x0, y0, sx, sy, rho, A in data:
ret += proc(x, y, x0, y0, sx, sy, rho, A)
# ret /= np.max(ret)
return ret
# def getlargeimage(npeaks,nsigma,shape,subshape):
# n = np.round(npeaks*min(shape)/min(subshape)).astype(np.int)
# image = np.zeros(shape,dtype=np.float32)
# np.random.seed(1)
# x = shape[0]*np.random.random(n**2)
# y = shape[1]*np.random.random(n**2)
# image[x.astype(np.int), y.astype(np.int)] = 1
# image = scipy.ndimage.gaussian_filter(image, sigma=min(subshape)/(npeaks*nsigma))
# image /= np.max(image)
def makeGaussian1D(x, x0, sx, A):
return A * np.exp(-((x - x0) ** 2) / (2.0 * sx ** 2)) # /(np.sqrt(2*np.pi)*sx)
def gettransformedvector(x, data, angle=False):
ret = np.zeros(x.shape, dtype=float)
for x0, sx, A in data:
ret += makeGaussian1D(x, x0, sx, A)
# ret /= np.max(ret)
return ret
def translation(dx, dy):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0:2, 2] = [dx, dy]
Mcof[0:2, 2] = [-dx, -dy]
return Mcoord, Mcof
def rigid(a, tx, ty):
if a < 0 or a > 1:
raise ValueError("A rigid transformation is expected.")
Mcoord = np.identity(3)
Mcof = np.identity(3)
b = np.sqrt(1 - a * a)
Mcoord[0, 0:3] = [a, -b, tx]
Mcoord[1, 0:3] = [b, a, ty]
Mcof[0, 0:3] = [a, b, -a * tx - b * ty]
Mcof[1, 0:3] = [-b, a, b * tx - a * ty]
return Mcoord, Mcof
def similarity(a, b, tx, ty):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, -b, tx]
Mcoord[1, 0:3] = [b, a, ty]
s = np.float(a * a + b * b)
Mcof[0, 0:3] = [a / s, b / s, -(a * tx + b * ty) / s]
Mcof[1, 0:3] = [-b / s, a / s, (b * tx - a * ty) / s]
return Mcoord, Mcof
def affine(a, b, c, d, tx, ty):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, b, tx]
Mcoord[1, 0:3] = [c, d, ty]
det = np.float(a * d - b * c)
Mcof[0, 0:3] = [d / det, -b / det, (b * ty - d * tx) / det]
Mcof[1, 0:3] = [-c / det, a / det, (c * tx - a * ty) / det]
return Mcoord, Mcof
def projective(a, b, c, d, tx, ty, px, py):
Mcoord = np.identity(3)
Mcof = np.identity(3)
Mcoord[0, 0:3] = [a, b, tx]
Mcoord[1, 0:3] = [c, d, ty]
Mcoord[2, 0:3] = [px, py, 1]
det = np.float(
a * d - a * py * ty - b * c + b * px * ty + c * py * tx - d * px * tx
)
Mcof[0, 0:3] = [d - py * ty, py * tx - b, b * ty - d * tx]
Mcof[1, 0:3] = [px * ty - c, a - px * tx, c * tx - a * ty]
Mcof[2, 0:3] = [c * py - d * px, b * px - a * py, a * d - b * c]
Mcof /= det
return Mcoord, Mcof
def transformation(t, n, subpixel=True):
if t == transformationType.rigid:
# total rotation is 40 degrees
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
# a = np.sqrt(3)/2 # between -1 and 1
elif t == transformationType.similarity:
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
b = np.around(np.sin(40.0 / 180.0 * np.pi / (n - 1)), 3)
a *= 1.1
b *= 1.1
else:
a = np.around(np.cos(40.0 / 180.0 * np.pi / (n - 1)), 3)
b = np.around(np.sin(40.0 / 180.0 * np.pi / (n - 1)), 3)
a *= 1.1
b *= 1.2
c = -b * 0.9
d = a * 0.9
if subpixel:
tx = 1.5
ty = -1.7
else:
tx = 1.0
ty = -4.0 # TODO; -2
px = 0.001
py = -0.001
if t == transformationType.translation:
return translation(tx, ty)
elif t == transformationType.rigid:
return rigid(a, tx, ty)
elif t == transformationType.similarity:
return similarity(a, b, tx, ty)
elif t == transformationType.affine:
return affine(a, b, c, d, tx, ty)
elif t == transformationType.projective:
return projective(a, b, c, d, tx, ty, px, py)
else:
raise NotImplementedError("This transformation to has not been implemented.")
class rvgen(rv_continuous):
def _pdf(self, x):
H = 1 / np.sqrt(2.0 * np.pi)
return H * np.exp(-(x ** 2) / 2.0)
return H * (1 - np.exp(-(x ** 2) / 2.0))
def random(a, b, n):
return a + (b - a) * np.random.random(n)
def genpos1d(a, b, npeaks):
x = random(a, b, npeaks)
dr = (b - a) * 0.1
xm = (a + b) / 2.0
ind = abs(x - xm) > dr
x = x[ind]
npeaks = sum(ind)
return x, npeaks
def genpos2d(a, b, c, d, npeaks):
x = random(a, b, npeaks)
y = random(c, d, npeaks)
dr = max(b - a, d - c) * 0.1
xm = (a + b) / 2.0
ym = (c + d) / 2.0
r = ((x - xm) ** 2 + (y - ym) ** 2) ** 0.5
ind = r > dr
x = x[ind]
y = y[ind]
npeaks = sum(ind)
return x, y, npeaks
def data(
transfotype,
ndim1=61, # TODO 61
ndim2=57,
nimages=4,
nstacks=2,
ndim=100,
vector=False,
transposed=False,
realistic=True,
subpixel=True,
plot=False,
inverse=False,
):
"""
Returns:
3-tuple: list of image stacks, change-of-coordinate matrix between the subsequent images, stack dimensions
"""
if vector and transfotype != transformationType.translation:
raise ValueError("Vectors can only be shifted.")
# Transformation between images
Mcoord, Mcof = transformation(transfotype, nimages, subpixel=subpixel)
stackdim = 0
if vector:
# Shape of a subimage
if transposed:
dimi = 0
subshape = (ndim, 1)
else:
dimi = 1
subshape = (1, ndim)
Mcoord[dimi, 2] = 0
Mcof[dimi, 2] = 0
subxmin = -subshape[dimi] // 2
subxmax = subshape[dimi] // 2
if transposed:
subshape = (subxmax - subxmin + 1, 1)
else:
subshape = (1, subxmax - subxmin + 1)
# Determine shape of large image (transform corners)
d = Mcof[1 - dimi, 2] * (nimages - 1)
xmin = int(min(subxmin, subxmin + d))
xmax = int(max(subxmax, subxmax + d))
# Gaussians in large image
npix = np.product(subshape)
npixperpeak = 2.0
npeaks = int(npix / npixperpeak)
sx = npixperpeak * 1.5
margin = int(10 * sx)
xmin -= margin
xmax += margin
shape = subshape
if transposed:
shape = (xmax - xmin + 1, 1)
else:
shape = (1, xmax - xmin + 1)
np.random.seed(1)
if realistic:
x0, npeaks = genpos1d(xmin, xmax, npeaks)
else:
x0, npeaks = genpos1d(subxmin, subxmax, npeaks)
ind = (x0 > 5) | (x0 < 5)
x0 = x0[ind]
npeaks = sum(ind)
sx = random(sx * 0.8, sx * 1.2, npeaks)
A = random(1, 5, npeaks)
data = tuple(zip(x0, sx, A))
# Plot full images
if plot:
xv = np.arange(xmin, xmax + 1)
img = gettransformedvector(xv, data).reshape(shape)
import matplotlib.pyplot as plt
plt.figure(2)
plt.plot(xv.flat, img.flat)
plt.show()
# Stack of transformed subimages
if realistic:
s = subshape
xv = np.arange(subxmin, subxmax + 1)
else:
s = shape
xv = np.arange(xmin, xmax + 1)
ret = np.empty((nimages,) + s, dtype=np.float32)
xy = np.copy(xv)
ret[0] = gettransformedvector(xy, data).reshape(s)
for i in range(1, nimages):
xy = xy + Mcof[1 - dimi, 2]
ret[i] = gettransformedvector(xy, data).reshape(s)
else:
# Shape of a subimage
subshape = (ndim1, ndim2)
subxmin = -subshape[1] // 2
subymin = -subshape[0] // 2
subxmax = subshape[1] // 2
subymax = subshape[0] // 2
subxmax = subshape[1] - 1
subymax = subshape[0] - 1
subshape = (subymax - subymin + 1, subxmax - subxmin + 1)
# Determine shape of large image (transform corners)
xy = np.empty((3, 4))
xy[0, :] = [subxmin, subxmax, subxmin, subxmax]
xy[1, :] = [subymin, subymin, subymax, subymax]
xy[2, :] = [1, 1, 1, 1]
myminmax = np.append(np.min(xy, axis=1), np.max(xy, axis=1))
for i in range(1, nimages):
xy = np.dot(Mcoord, xy)
xy[0, :] /= xy[2, :]
xy[1, :] /= xy[2, :]
myminmax[0:3] = np.minimum(myminmax[0:3], np.min(xy, axis=1))
myminmax[3:] = np.maximum(myminmax[3:], np.max(xy, axis=1))
xmin = int(myminmax[0])
ymin = int(myminmax[1])
xmax = int(myminmax[3])
ymax = int(myminmax[4])
# Gaussians in large image
npix = np.product(subshape)
npixperpeak = 10.0
npeaks = int(npix / npixperpeak)
sxy = np.sqrt(npixperpeak)
margin = int(10 * sxy)
xmin -= margin
ymin -= margin
xmax += margin
ymax += margin
shape = (ymax - ymin + 1, xmax - xmin + 1)
np.random.seed(1)
if realistic:
x0, y0, npeaks = genpos2d(xmin, xmax, ymin, ymax, npeaks)
else:
x0, y0, npeaks = genpos2d(subxmin, subxmax, subymin, subymax, npeaks)
sx = random(sxy * 0.8, sxy * 1.2, npeaks)
sy = random(sxy * 0.8, sxy * 1.2, npeaks)
rho = random(0, 0.2, npeaks)
A = random(1, 5, npeaks)
data = tuple(zip(x0, y0, sx, sy, rho, A))
# Plot full image
if plot:
xv, yv = np.meshgrid(np.arange(xmin, xmax + 1), np.arange(ymin, ymax + 1))
xv = xv.reshape((1, shape[0] * shape[1]))
yv = yv.reshape((1, shape[0] * shape[1]))
xy = np.vstack((xv, yv, np.ones_like(xv)))
img = gettransformedimage(xv, yv, data).reshape(shape)
import matplotlib.pyplot as plt
plt.figure(2)
plt.subplot(111)
plt.imshow(img, origin="lower", interpolation="nearest")
plt.show()
# Stack of transformed subimages
if realistic:
s = subshape
xv, yv = np.meshgrid(
np.arange(subxmin, subxmax + 1), np.arange(subymin, subymax + 1)
)
ox, oy = subxmin, subymin
else:
s = shape
xv, yv = np.meshgrid(np.arange(xmin, xmax + 1), np.arange(ymin, ymax + 1))
ox, oy = xmin, ymin
ret = np.empty((nimages,) + s, dtype=np.float32)
xv = xv.reshape((1, s[0] * s[1]))
yv = yv.reshape((1, s[0] * s[1]))
xy = np.vstack((xv, yv, np.ones_like(xv)))
ret[0] = gettransformedimage(xv, yv, data).reshape(s)
for i in range(1, nimages):
xy = np.dot(Mcof, xy) # coordinates from new frame to old frame
xy[0, :] /= xy[2, :]
xy[1, :] /= xy[2, :]
ret[i] = gettransformedimage(xy[0, :], xy[1, :], data).reshape(s)
# Relative change-of-frame in subimage pixel frame
C = np.identity(3, dtype=Mcof.dtype)
Cinv = np.identity(3, dtype=Mcof.dtype)
C[0:2, 2] = [ox, oy] # image pixel frame to subimage pixel frame
Cinv[0:2, 2] = -C[0:2, 2]
Mcof = np.dot(np.dot(Cinv, Mcof), C)
Mcoord = np.dot(np.dot(Cinv, Mcoord), C)
# Mcoord: change-of-frame matrix of the back-transformation
if inverse:
ret = ret.max() - ret
return [ret.copy() for _ in range(nstacks)], Mcoord, stackdim
| |
from __future__ import print_function, division, absolute_import
from time import sleep, time
import pytest
from toolz import concat, valmap, partial
from dask import compute, get
from dask.bytes.local import read_bytes, open_files, getsize
from dask.bytes.core import open_text_files
from dask.compatibility import FileNotFoundError
from dask.utils import filetexts
from dask.bytes import compression
compute = partial(compute, get=get)
files = {'.test.accounts.1.json': (b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'),
'.test.accounts.2.json': (b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n')}
def test_read_bytes():
with filetexts(files, mode='b'):
sample, values = read_bytes('.test.accounts.*')
assert isinstance(sample, bytes)
assert sample[:5] == files[sorted(files)[0]][:5]
assert isinstance(values, (list, tuple))
assert isinstance(values[0], (list, tuple))
assert hasattr(values[0][0], 'dask')
assert sum(map(len, values)) >= len(files)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_read_bytes_blocksize_none():
with filetexts(files, mode='b'):
sample, values = read_bytes('.test.accounts.*', blocksize=None)
assert sum(map(len, values)) == len(files)
def test_read_bytes_block():
with filetexts(files, mode='b'):
for bs in [5, 15, 45, 1500]:
sample, vals = read_bytes('.test.account*', blocksize=bs)
assert (list(map(len, vals)) ==
[(len(v) // bs + 1) for v in files.values()])
results = compute(*concat(vals))
assert (sum(len(r) for r in results) ==
sum(len(v) for v in files.values()))
ourlines = b"".join(results).split(b'\n')
testlines = b"".join(files.values()).split(b'\n')
assert set(ourlines) == set(testlines)
def test_read_bytes_delimited():
with filetexts(files, mode='b'):
for bs in [5, 15, 45, 1500]:
_, values = read_bytes('.test.accounts*',
blocksize=bs, delimiter=b'\n')
_, values2 = read_bytes('.test.accounts*',
blocksize=bs, delimiter=b'foo')
assert ([a.key for a in concat(values)] !=
[b.key for b in concat(values2)])
results = compute(*concat(values))
res = [r for r in results if r]
assert all(r.endswith(b'\n') for r in res)
ourlines = b''.join(res).split(b'\n')
testlines = b"".join(files[k] for k in sorted(files)).split(b'\n')
assert ourlines == testlines
# delimiter not at the end
d = b'}'
_, values = read_bytes('.test.accounts*', blocksize=bs, delimiter=d)
results = compute(*concat(values))
res = [r for r in results if r]
# All should end in } except EOF
assert sum(r.endswith(b'}') for r in res) == len(res) - 2
ours = b"".join(res)
test = b"".join(files[v] for v in sorted(files))
assert ours == test
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10) for fmt in seekable_files]
@pytest.mark.parametrize('fmt,blocksize', fmt_bs)
def test_compression(fmt, blocksize):
compress = compression.compress[fmt]
files2 = valmap(compress, files)
with filetexts(files2, mode='b'):
sample, values = read_bytes('.test.accounts.*.json',
blocksize=blocksize, delimiter=b'\n', compression=fmt)
assert sample[:5] == files[sorted(files)[0]][:5]
results = compute(*concat(values))
assert (b''.join(results) ==
b''.join([files[k] for k in sorted(files)]))
def test_registered_read_bytes():
from dask.bytes.core import read_bytes
with filetexts(files, mode='b'):
sample, values = read_bytes('.test.accounts.*')
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_registered_open_files():
from dask.bytes.core import open_files
with filetexts(files, mode='b'):
myfiles = open_files('.test.accounts.*')
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k] for k in sorted(files)]
@pytest.mark.parametrize('encoding', ['utf-8', 'ascii'])
def test_registered_open_text_files(encoding):
from dask.bytes.core import open_text_files
with filetexts(files, mode='b'):
myfiles = open_text_files('.test.accounts.*', encoding=encoding)
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k].decode(encoding)
for k in sorted(files)]
def test_open_files():
with filetexts(files, mode='b'):
myfiles = open_files('.test.accounts.*')
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k] for k in sorted(files)]
@pytest.mark.parametrize('fmt', [fmt for fmt in cfiles])
def test_compression_binary(fmt):
from dask.bytes.core import open_files
files2 = valmap(compression.compress[fmt], files)
with filetexts(files2, mode='b'):
myfiles = open_files('.test.accounts.*', compression=fmt)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k] for k in sorted(files)]
@pytest.mark.parametrize('fmt', [fmt for fmt in cfiles])
def test_compression_text(fmt):
files2 = valmap(compression.compress[fmt], files)
with filetexts(files2, mode='b'):
myfiles = open_text_files('.test.accounts.*', compression=fmt)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k].decode() for k in sorted(files)]
@pytest.mark.parametrize('fmt', list(seekable_files))
def test_getsize(fmt):
compress = compression.compress[fmt]
with filetexts({'.tmp.getsize': compress(b'1234567890')}, mode = 'b'):
assert getsize('.tmp.getsize', fmt) == 10
def test_bad_compression():
from dask.bytes.core import read_bytes, open_files, open_text_files
with filetexts(files, mode='b'):
for func in [read_bytes, open_files, open_text_files]:
with pytest.raises(ValueError):
sample, values = func('.test.accounts.*',
compression='not-found')
def test_not_found():
fn = 'not-a-file'
with pytest.raises(FileNotFoundError) as e:
read_bytes(fn)
assert fn in str(e)
@pytest.mark.slow
def test_names():
with filetexts(files, mode='b'):
_, a = read_bytes('.test.accounts.*')
_, b = read_bytes('.test.accounts.*')
a = list(concat(a))
b = list(concat(b))
assert [aa._key for aa in a] == [bb._key for bb in b]
sleep(1)
for fn in files:
with open(fn, 'ab') as f:
f.write(b'x')
_, c = read_bytes('.test.accounts.*')
c = list(concat(c))
assert [aa._key for aa in a] != [cc._key for cc in c]
@pytest.mark.parametrize('open_files', [open_files, open_text_files])
def test_modification_time_open_files(open_files):
with filetexts(files, mode='b'):
a = open_files('.test.accounts.*')
b = open_files('.test.accounts.*')
assert [aa._key for aa in a] == [bb._key for bb in b]
sleep(1)
double = lambda x: x + x
with filetexts(valmap(double, files), mode='b'):
c = open_files('.test.accounts.*')
assert [aa._key for aa in a] != [cc._key for cc in c]
| |
def direct_f(size, p_, cos_coef, sin_coef, l_max_dir, diff=False):
from numpy import zeros, real, imag
from numpy.fft import fft
f = zeros((size + 1, size / 2 + 1))
func1 = 0.0
func2 = 0.0
fa = zeros(size)
fb = zeros(size)
if diff:
for j in xrange(1, size / 2):
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
func1 += cos_coef[m][l] * p_[j][m][l]
func2 += sin_coef[m][l] * p_[j][m][l]
fa[m] = func1
fb[m] = func2
func1 = 0.0
func2 = 0.0
t = - imag(fft(fa)) + real(fft(fb))
f[0:size, j] = t[:]
f[size][j] = f[0][j]
else:
for j in xrange(1, size / 2):
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
func1 += cos_coef[m][l] * p_[j][m][l]
func2 += sin_coef[m][l] * p_[j][m][l]
fa[m] = func1
fb[m] = func2
func1 = 0.0
func2 = 0.0
t = real(fft(fa)) + imag(fft(fb))
f[0:size, j] = t[:]
f[size][j] = f[0][j]
return f
def inverse_f(size, f, p_, l_max_inv):
from numpy import zeros, real, imag
from numpy.fft import ifft
from math import pi, sin
f = f.T
back_cos_coef = zeros((l_max_inv + 1, l_max_inv + 1))
back_sin_coef = zeros((l_max_inv + 1, l_max_inv + 1))
norm = 0.0
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
norm += sin(theta)
for my_m in xrange(0, l_max_inv + 1):
for my_l in xrange(my_m, l_max_inv + 1):
sum_a = 0.0
sum_b = 0.0
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
back_f = ifft(f[j][0:size])
back_fa = real(back_f)
back_fb = imag(back_f)
sum_a += p_[j][my_m][my_l] * back_fa[j][my_m] * sin(theta) / norm * 4 * pi
sum_b -= p_[j][my_m][my_l] * back_fb[j][my_m] * sin(theta) / norm * 4 * pi
back_cos_coef[my_m][my_l] = sum_a
back_sin_coef[my_m][my_l] = sum_b
return back_cos_coef, back_sin_coef
def coef_1(in_l, in_m):
from math import sqrt
if in_l != 0:
return sqrt((in_l - in_m) * (2.0 * in_l + 1.0)
/ ((in_l + in_m) * (2.0 * in_l - 1.0)))
if in_l == 0:
return 0.0
def legendre_discrete(j, n, l_max):
from math import pi, sqrt, sin, cos
from numpy import zeros
theta = 2.0 * pi * j / n
p_ = zeros((l_max + 1, l_max + 1))
p_[0][0] = 1.0 / sqrt(4.0 * pi)
for m in xrange(0, l_max):
p_[m + 1][m + 1] = - p_[m][m] * sin(theta) * sqrt(2.0 * m + 3.0) / sqrt(2.0 * m + 2.0)
for m in xrange(0, l_max):
p_[m][m + 1] = p_[m][m] * cos(theta) * sqrt(2.0 * m + 3.0)
for m in xrange(0, l_max - 1):
for l in xrange(m + 2, l_max + 1):
p_[m][l] = ((2.0 * l - 1.0) * sqrt((l - m) * (2.0 * l + 1.0)) / sqrt((l + m) * (2.0 * l - 1.0)) *
p_[m][l - 1] * cos(theta) - (l + m - 1.0) * sqrt((l - m) * (l - 1.0 - m) *
(2.0 * l + 1.0)) / sqrt(
(l + m) * (l - 1.0 + m) * (2.0 * l - 3.0)) * p_[m][l - 2]) / \
(l - m)
for m in xrange(1, l_max + 1):
for l in xrange(m, l_max + 1):
p_[m][l] *= sqrt(2.0)
return p_
def legendre_x_discrete(j, n, l_max):
from math import pi, sin
from numpy import zeros
theta = 2.0 * pi * j / n
f_x = zeros((l_max + 1, l_max + 1))
p_ = legendre_discrete(j, n, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_x[m][l] = m * p_[m][l] / sin(theta)
return f_x
def legendre_y_discrete(j, n, l_max):
from math import pi, sin, cos
from numpy import zeros
theta = 2.0 * pi * j / n
f_y = zeros((l_max + 1, l_max + 1))
p_ = legendre_discrete(j, n, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_y[m][l] = l * cos(theta) / sin(theta) * p_[m][l] - \
1.0 / sin(theta) * (l + m) * coef_1(l, m) * p_[m][l - 1]
return f_y
def legendre_xx_discrete(j, n, l_max):
from math import pi, sin, cos
from numpy import zeros
theta = 2.0 * pi * j / n
f_xx_1 = zeros((l_max + 1, l_max + 1))
f_xx_2 = zeros((l_max + 1, l_max + 1))
p_ = legendre_discrete(j, n, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xx_1[m][l] = - m * m * p_[m][l] / (sin(theta) * sin(theta))
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xx_2[m][l] = (l * cos(theta) / sin(theta) * p_[m][l] - 1.0 / sin(theta) * (l + m) * coef_1(l, m) *
p_[m][l - 1]) * cos(theta) / sin(theta)
return f_xx_1 + f_xx_2
def legendre_yy_discrete(j, n, l_max):
from math import pi, sin, cos
from numpy import zeros
theta = 2.0 * pi * j / n
f_yy = zeros((l_max + 1, l_max + 1))
p_ = legendre_discrete(j, n, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_yy[m][l] = 0.5 / sin(theta) * ((1.0 / sin(theta)) * (l * l * cos(2.0 * theta) -
(l + 2.0) * l + 2.0 * m * m) * p_[m][l] + 2.0 * (l + m) * cos(theta) /
sin(theta) * coef_1(l, m) * p_[m][l - 1])
return f_yy
def legendre_xy_discrete(j, n, l_max):
from math import pi, sin, cos
from numpy import zeros
theta = 2.0 * pi * j / n
f_xy = zeros((l_max + 1, l_max + 1))
p_ = legendre_discrete(j, n, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xy[m][l] = m / sin(theta) * ((1.0 / sin(theta)) * (l + m) * p_[m][l - 1] * coef_1(l, m) -
(l - 1.0) * cos(theta) / sin(theta) * p_[m][l])
return f_xy
def legendre(theta, l_max):
from math import pi, sqrt, sin, cos
from numpy import zeros
p_ = zeros((l_max + 1, l_max + 1))
p_[0][0] = 1.0 / sqrt(4.0 * pi)
for m in xrange(0, l_max):
p_[m + 1][m + 1] = - p_[m][m] * sin(theta) * sqrt(2.0 * m + 3.0) / sqrt(2.0 * m + 2.0)
for m in xrange(0, l_max):
p_[m][m + 1] = p_[m][m] * cos(theta) * sqrt(2.0 * m + 3.0)
for m in xrange(0, l_max - 1):
for l in xrange(m + 2, l_max + 1):
p_[m][l] = ((2.0 * l - 1.0) * sqrt((l - m) * (2.0 * l + 1.0)) / sqrt((l + m) * (2.0 * l - 1.0)) *
p_[m][l - 1] * cos(theta) - (l + m - 1.0) * sqrt((l - m) * (l - 1.0 - m) *
(2.0 * l + 1.0)) / sqrt(
(l + m) * (l - 1.0 + m) * (2.0 * l - 3.0)) * p_[m][l - 2]) / \
(l - m)
for m in xrange(1, l_max + 1):
for l in xrange(m, l_max + 1):
p_[m][l] *= sqrt(2.0)
return p_
def legendre_x(theta, l_max):
from math import sin
from numpy import zeros
f_x = zeros((l_max + 1, l_max + 1))
p_ = legendre(theta, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_x[m][l] = m * p_[m][l] / sin(theta)
return f_x
def legendre_y(theta, l_max):
from math import sin, cos
from numpy import zeros
f_y = zeros((l_max + 1, l_max + 1))
p_ = legendre(theta, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_y[m][l] = l * cos(theta) / sin(theta) * p_[m][l] - \
1.0 / sin(theta) * (l + m) * coef_1(l, m) * p_[m][l - 1]
return f_y
def legendre_xx(theta, l_max):
from math import sin, cos
from numpy import zeros
f_xx_1 = zeros((l_max + 1, l_max + 1))
f_xx_2 = zeros((l_max + 1, l_max + 1))
p_ = legendre(theta, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xx_1[m][l] = - m * m * p_[m][l] / (sin(theta) * sin(theta))
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xx_2[m][l] = (l * cos(theta) / sin(theta) * p_[m][l] - 1.0 / sin(theta) * (l + m) * coef_1(l, m) *
p_[m][l - 1]) * cos(theta) / sin(theta)
return f_xx_1 + f_xx_2
def legendre_yy(theta, l_max):
from math import sin, cos
from numpy import zeros
f_yy = zeros((l_max + 1, l_max + 1))
p_ = legendre(theta, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_yy[m][l] = 0.5 / sin(theta) * ((1.0 / sin(theta)) * (l * l * cos(2.0 * theta) -
(l + 2.0) * l + 2.0 * m * m) * p_[m][l] + 2.0 * (l + m) * cos(theta) /
sin(theta) * coef_1(l, m) * p_[m][l - 1])
return f_yy
def legendre_xy(theta, l_max):
from math import sin, cos
from numpy import zeros
f_xy = zeros((l_max + 1, l_max + 1))
p_ = legendre(theta, l_max)
for l in xrange(2, l_max + 1):
for m in xrange(0, l + 1):
f_xy[m][l] = m / sin(theta) * ((1.0 / sin(theta)) * (l + m) * p_[m][l - 1] * coef_1(l, m) -
(l - 1.0) * cos(theta) / sin(theta) * p_[m][l])
return f_xy
def spin_descrete(j, n, l_max, sign=+2):
f_xx = legendre_xx_discrete(j, n, l_max)
f_yy = legendre_yy_discrete(j, n, l_max)
f_xy = legendre_xy_discrete(j, n, l_max)
x1 = f_xx - f_yy
x2 = -f_xy
if sign == +2:
return x1 + x2
if sign == -2:
return x1 - x2
def spin(theta, l_max, sign=+2):
f_xx = legendre_xx(theta, l_max)
f_yy = legendre_yy(theta, l_max)
f_xy = legendre_xy(theta, l_max)
x1 = f_xx - f_yy
x2 = -f_xy
if sign == +2:
return x1 + x2
if sign == -2:
return x1 - x2
def direct_f_int(size, cos_coef, sin_coef, l_max_dir, sign=0, diff=False):
from numpy import zeros, real, imag
from numpy.fft import fft
f = zeros((size + 1, size / 2 + 1))
func1 = 0.0
func2 = 0.0
fa = zeros(size)
fb = zeros(size)
if diff:
for j in xrange(1, size / 2):
if sign == 0:
p_ = legendre_discrete(j, size, l_max_dir)
elif sign == 1:
p_ = legendre_x_discrete(j, size, l_max_dir)
elif sign == 2:
p_ = legendre_y_discrete(j, size, l_max_dir)
elif sign == 11:
p_ = legendre_xx_discrete(j, size, l_max_dir)
elif sign == 22:
p_ = legendre_yy_discrete(j, size, l_max_dir)
elif sign == 12 or sign == 21:
p_ = legendre_xy_discrete(j, size, l_max_dir)
else:
print 'Not valid'
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
func1 += cos_coef[m][l] * p_[m][l]
func2 += sin_coef[m][l] * p_[m][l]
fa[m] = func1
fb[m] = func2
func1 = 0.0
func2 = 0.0
t = - imag(fft(fa)) + real(fft(fb))
f[0:size, j] = t[:]
f[size][j] = f[0][j]
else:
for j in xrange(1, size / 2):
if sign == 0:
p_ = legendre_discrete(j, size, l_max_dir)
elif sign == 1:
p_ = legendre_x_discrete(j, size, l_max_dir)
elif sign == 2:
p_ = legendre_y_discrete(j, size, l_max_dir)
elif sign == 11:
p_ = legendre_xx_discrete(j, size, l_max_dir)
elif sign == 22:
p_ = legendre_yy_discrete(j, size, l_max_dir)
elif sign == 12 or sign == 21:
p_ = legendre_xy_discrete(j, size, l_max_dir)
else:
print 'Not valid'
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
func1 += cos_coef[m][l] * p_[m][l]
func2 += sin_coef[m][l] * p_[m][l]
fa[m] = func1
fb[m] = func2
func1 = 0.0
func2 = 0.0
t = real(fft(fa)) + imag(fft(fb))
f[0:size, j] = t[:]
f[size][j] = f[0][j]
return f
def inverse_f_int(size, f, l_max_inv):
from numpy import zeros, real, imag
from numpy.fft import ifft
from math import pi, sin
f = f.T
norm = 0.0
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
norm += sin(theta)
sum_a = zeros((l_max_inv + 1, l_max_inv + 1))
sum_b = zeros((l_max_inv + 1, l_max_inv + 1))
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
p_ = legendre_discrete(j, size, l_max_inv)
for my_m in xrange(0, l_max_inv + 1):
for my_l in xrange(my_m, l_max_inv + 1):
back_f = ifft(f[j][0:size])
back_fa = real(back_f)
back_fb = imag(back_f)
sum_a[my_m][my_l] += p_[my_m][my_l] * back_fa[my_m] * sin(theta) / norm * 4.0 * pi
sum_b[my_m][my_l] -= p_[my_m][my_l] * back_fb[my_m] * sin(theta) / norm * 4.0 * pi
return sum_a, sum_b
def direct_point_int(phi, theta, cos_coef, sin_coef, l_max_dir, sign=0, diff=False):
from numpy import cos, sin, pi, zeros
f = 0.0
phi -= pi
theta += pi / 2
sum_a = 0.0
sum_b = 0.0
fa = zeros(l_max_dir + 1)
fb = zeros(l_max_dir + 1)
if diff:
if sign == 0:
p_ = legendre(theta, l_max_dir)
elif sign == 1:
p_ = legendre_x(theta, l_max_dir)
elif sign == 2:
p_ = legendre_y(theta, l_max_dir)
elif sign == 11:
p_ = legendre_xx(theta, l_max_dir)
elif sign == 22:
p_ = legendre_yy(theta, l_max_dir)
elif sign == 12:
p_ = legendre_xy(theta, l_max_dir)
else:
print 'Not valid'
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
sum_a += p_[m][l] * cos_coef[m][l]
sum_b += p_[m][l] * sin_coef[m][l]
fa[m] = sum_a
fb[m] = sum_b
sum_a = 0.0
sum_b = 0.0
for m in xrange(0, l_max_dir + 1):
f = f + fa[m] * sin(phi * m) + fb[m] * cos(phi * m)
else:
if sign == 0:
p_ = legendre(theta, l_max_dir)
elif sign == 1:
p_ = legendre_x(theta, l_max_dir)
elif sign == 2:
p_ = legendre_y(theta, l_max_dir)
elif sign == 11:
p_ = legendre_xx(theta, l_max_dir)
elif sign == 22:
p_ = legendre_yy(theta, l_max_dir)
elif sign == 12:
p_ = legendre_xy(theta, l_max_dir)
else:
print 'Not valid'
for m in xrange(0, l_max_dir + 1):
for l in xrange(m, l_max_dir + 1):
sum_a += p_[m][l] * cos_coef[m][l]
sum_b += p_[m][l] * sin_coef[m][l]
fa[m] = sum_a
fb[m] = sum_b
sum_a = 0.0
sum_b = 0.0
for m in xrange(0, l_max_dir + 1):
f = f + fa[m] * cos(phi * m) - fb[m] * sin(phi * m)
return f
def inverse_f_spin_int(size, f, l_max_inv, sign=+2):
from numpy import zeros, real, imag
from numpy.fft import ifft
from math import pi, sin
f = f.T
norm = 0.0
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
norm += sin(theta)
sum_a = zeros((l_max_inv + 1, l_max_inv + 1))
sum_b = zeros((l_max_inv + 1, l_max_inv + 1))
for j in xrange(1, size / 2):
theta = 2.0 * pi * j / size
p_ = spin_descrete(j, size, l_max_inv, sign)
for my_m in xrange(0, l_max_inv + 1):
for my_l in xrange(my_m, l_max_inv + 1):
back_f = ifft(f[j][0:size])
back_fa = real(back_f)
back_fb = imag(back_f)
sum_a[my_m][my_l] += p_[my_m][my_l] * back_fa[my_m] * sin(theta) / norm * 4.0 * pi
sum_b[my_m][my_l] -= p_[my_m][my_l] * back_fb[my_m] * sin(theta) / norm * 4.0 * pi
a_r = (sum_a + sum_b)/2.0
a_i = (sum_a - sum_b)/2.0
return a_r, a_i
| |
import fnmatch
import os
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.template.defaultfilters import slugify
from django.template.loader import render_to_string
from projects import constants
from projects.exceptions import ProjectImportError
from projects.templatetags.projects_tags import sort_version_aware
from projects.utils import diff, dmp, safe_write
from projects.utils import highest_version as _highest
from taggit.managers import TaggableManager
from tastyapi.slum import api
from vcs_support.base import VCSProject
from vcs_support.backends import backend_cls
from vcs_support.utils import Lock
SITE_DOMAIN = getattr(settings, 'SITE_DOMAIN', 'readthedocs.org')
class ProjectManager(models.Manager):
def live(self, *args, **kwargs):
base_qs = self.filter(skip=False)
return base_qs.filter(*args, **kwargs)
class ProjectRelationship(models.Model):
parent = models.ForeignKey('Project', related_name='subprojects')
child = models.ForeignKey('Project', related_name='superprojects')
def __unicode__(self):
return "%s -> %s" % (self.parent, self.child)
#HACK
def get_absolute_url(self):
return "http://%s.readthedocs.org/projects/%s/en/latest/" % (self.parent.slug, self.child.slug)
class Project(models.Model):
#Auto fields
pub_date = models.DateTimeField(auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
#Generally from conf.py
users = models.ManyToManyField(User, related_name='projects')
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True)
description = models.TextField(blank=True,
help_text='The reStructuredText description of the project')
repo = models.CharField(max_length=150, blank=True,
help_text='Checkout URL for your code (hg, git, etc.). Ex. https://github.com/ericholscher/django-kong.git')
repo_type = models.CharField(max_length=10, choices=constants.REPO_CHOICES, default='git')
project_url = models.URLField(blank=True, help_text='The project\'s homepage', verify_exists=False)
version = models.CharField(max_length=100, blank=True,
help_text='Project version these docs apply to, i.e. 1.0a')
copyright = models.CharField(max_length=255, blank=True,
help_text='Project copyright information')
theme = models.CharField(max_length=20,
choices=constants.DEFAULT_THEME_CHOICES, default=constants.THEME_DEFAULT,
help_text='<a href="http://sphinx.pocoo.org/theming.html#builtin-themes" target="_blank">Examples</a>')
suffix = models.CharField(max_length=10, editable=False, default='.rst')
default_version = models.CharField(max_length=255, default='latest', help_text='The version of your project that / redirects to')
# In default_branch, None max_lengtheans the backend should choose the appropraite branch. Eg 'master' for git
default_branch = models.CharField(max_length=255, default=None, null=True,
blank=True, help_text='What branch "latest" points to. Leave empty to use the default value for your VCS (eg. trunk or master).')
requirements_file = models.CharField(max_length=255, default=None, null=True, blank=True, help_text='Requires Virtualenv. A pip requirements file needed to build your documentation. Path from the root of your project.')
documentation_type = models.CharField(max_length=20,
choices=constants.DOCUMENTATION_CHOICES, default='sphinx',
help_text='Type of documentation you are building.')
analytics_code = models.CharField(max_length=50, null=True, blank=True, help_text="Google Analytics Tracking Code. This may slow down your page loads.")
#Other model data.
path = models.CharField(help_text="The directory where conf.py lives",
max_length=255, editable=False)
featured = models.BooleanField()
skip = models.BooleanField()
use_virtualenv = models.BooleanField(
help_text="Install your project inside a virtualenv using "
"setup.py install")
django_packages_url = models.CharField(max_length=255, blank=True)
crate_url = models.CharField(max_length=255, blank=True)
#Subprojects
related_projects = models.ManyToManyField('self', blank=True, null=True, symmetrical=False, through=ProjectRelationship)
tags = TaggableManager(blank=True)
objects = ProjectManager()
class Meta:
ordering = ('slug',)
def __unicode__(self):
return self.name
@property
def subdomain(self):
return "%s.%s" % (SITE_DOMAIN, self.slug) #.replace('_', '-')
def save(self, *args, **kwargs):
#if hasattr(self, 'pk'):
#previous_obj = self.__class__.objects.get(pk=self.pk)
#if previous_obj.repo != self.repo:
#Needed to not have an import loop on Project
#from projects import tasks
#This needs to run on the build machine.
#tasks.remove_dir.delay(os.path.join(self.doc_path, 'checkouts'))
if not self.slug:
self.slug = slugify(self.name)
if self.slug == '':
raise Exception("Model must have slug")
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse('projects_detail', args=[self.slug])
def get_docs_url(self, version_slug=None):
version = version_slug or self.get_default_version()
return reverse('docs_detail', kwargs={
'project_slug': self.slug,
'lang_slug': 'en',
'version_slug': version,
'filename': '',
})
def get_builds_url(self):
return reverse('builds_project_list', kwargs={
'project_slug': self.slug,
})
def get_pdf_url(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_URL,
'pdf',
self.slug,
version_slug,
'%s.pdf' % self.slug)
return path
def get_pdf_path(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_ROOT,
'pdf',
self.slug,
version_slug,
'%s.pdf' % self.slug)
return path
def get_epub_url(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_URL,
'epub',
self.slug,
version_slug,
'%s.epub' % self.slug)
return path
def get_epub_path(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_ROOT,
'epub',
self.slug,
version_slug,
'%s.epub' % self.slug)
return path
def get_manpage_url(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_URL,
'man',
self.slug,
version_slug,
'%s.1' % self.slug)
return path
def get_manpage_path(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_ROOT,
'man',
self.slug,
version_slug,
'%s.1' % self.slug)
return path
def get_htmlzip_url(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_URL,
'htmlzip',
self.slug,
version_slug,
'%s.zip' % self.slug)
return path
def get_htmlzip_path(self, version_slug='latest'):
path = os.path.join(settings.MEDIA_ROOT,
'htmlzip',
self.slug,
version_slug,
'%s.zip' % self.slug)
return path
#Doc PATH:
#MEDIA_ROOT/slug/checkouts/version/<repo>
@property
def doc_path(self):
return os.path.join(settings.DOCROOT, self.slug)
def checkout_path(self, version='latest'):
return os.path.join(self.doc_path, 'checkouts', version)
def venv_path(self, version='latest'):
return os.path.join(self.doc_path, 'envs', version)
def venv_bin(self, version='latest', bin='python'):
return os.path.join(self.venv_path(version), 'bin', bin)
def full_doc_path(self, version='latest'):
"""
The path to the documentation root in the project.
"""
doc_base = self.checkout_path(version)
for possible_path in ['docs', 'doc', 'Doc']:
if os.path.exists(os.path.join(doc_base, '%s' % possible_path)):
return os.path.join(doc_base, '%s' % possible_path)
#No docs directory, docs are at top-level.
return doc_base
def full_build_path(self, version='latest'):
"""
The path to the build html docs in the project.
"""
return os.path.join(self.conf_dir(version), "_build", "html")
def rtd_build_path(self, version="latest"):
"""
The path to the build html docs in the project.
"""
return os.path.join(self.doc_path, 'rtd-builds', version)
def rtd_cname_path(self, cname):
"""
The path to the build html docs in the project.
"""
return os.path.join(settings.CNAME_ROOT, cname)
def conf_file(self, version='latest'):
files = self.find('conf.py', version)
if not files:
files = self.full_find('conf.py', version)
if len(files) == 1:
return files[0]
elif len(files) > 1:
for file in files:
if file.find('doc', 70) != -1:
return file
else:
raise ProjectImportError("Conf File Missing.")
def conf_dir(self, version='latest'):
conf_file = self.conf_file(version)
if conf_file:
return conf_file.replace('/conf.py', '')
@property
def highest_version(self):
return _highest(self.api_versions())
@property
def is_imported(self):
return bool(self.repo)
@property
def has_good_build(self):
return self.builds.filter(success=True).exists()
@property
def has_versions(self):
return self.versions.exists()
@property
def has_aliases(self):
return self.aliases.exists()
def has_pdf(self, version_slug='latest'):
return os.path.exists(self.get_pdf_path(version_slug))
def has_manpage(self, version_slug='latest'):
return os.path.exists(self.get_manpage_path(version_slug))
def has_epub(self, version_slug='latest'):
return os.path.exists(self.get_epub_path(version_slug))
def has_htmlzip(self, version_slug='latest'):
return os.path.exists(self.get_htmlzip_path(version_slug))
@property
def sponsored(self):
return False
def vcs_repo(self, version='latest'):
#if hasattr(self, '_vcs_repo'):
#return self._vcs_repo
backend = backend_cls.get(self.repo_type)
if not backend:
repo = None
else:
proj = VCSProject(self.name,
self.default_branch,
self.checkout_path(version),
self.repo)
repo = backend(proj, version)
#self._vcs_repo = repo
return repo
@property
def contribution_backend(self):
if hasattr(self, '_contribution_backend'):
return self._contribution_backend
if not self.vcs_repo:
cb = None
else:
cb = self.vcs_repo.get_contribution_backend()
self._contribution_backend = cb
return cb
def repo_lock(self, timeout=5, polling_interval=0.2):
return Lock(self, timeout, polling_interval)
def find(self, file, version):
"""
A balla API to find files inside of a projects dir.
"""
matches = []
for root, dirnames, filenames in os.walk(self.full_doc_path(version)):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def full_find(self, file, version):
"""
A balla API to find files inside of a projects dir.
"""
matches = []
for root, dirnames, filenames in os.walk(self.checkout_path(version)):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def get_latest_build(self):
try:
return self.builds.filter(type='html')[0]
except IndexError:
return None
def api_versions(self):
from builds.models import Version
ret = []
for version_data in api.version.get(project=self.pk, active=True)['objects']:
del version_data['resource_uri']
project_data = version_data['project']
del project_data['users']
del project_data['resource_uri']
del project_data['absolute_url']
project = Project(**project_data)
version_data['project'] = project
ret.append(Version(**version_data))
return sort_version_aware(ret)
def active_versions(self):
return (self.versions.filter(built=True, active=True) |
self.versions.filter(active=True, uploaded=True))
def ordered_active_versions(self):
return sort_version_aware(self.versions.filter(active=True))
def all_active_versions(self):
"A temporary workaround for active_versions filtering out things that were active, but failed to build"
return self.versions.filter(active=True)
def version_from_branch_name(self, branch):
try:
return (self.versions.filter(identifier=branch) |
self.versions.filter(identifier='remotes/origin/%s'%branch))[0]
except IndexError:
return None
@property
def whitelisted(self):
#Hack this true for now.
return True
#File Building stuff.
#Not sure if this is used
def get_top_level_files(self):
return self.files.live(parent__isnull=True).order_by('ordering')
def get_index_filename(self):
return os.path.join(self.path, 'index.rst')
def get_rendered_index(self):
return render_to_string('projects/index.rst.html', {'project': self})
def write_index(self):
if not self.is_imported:
safe_write(self.get_index_filename(), self.get_rendered_index())
def get_latest_revisions(self):
revision_qs = FileRevision.objects.filter(file__project=self,
file__status=constants.LIVE_STATUS)
return revision_qs.order_by('-created_date')
def get_default_version(self):
"""
Get the default version (slug).
Returns self.default_version if the version with that slug actually
exists (is built and published). Otherwise returns 'latest'.
"""
# latest is a special case where we don't have to check if it exists
if self.default_version == 'latest':
return self.default_version
# check if the default_version exists
version_qs = self.versions.filter(
slug=self.default_version,
active=True
)
if version_qs.exists():
return self.default_version
return 'latest'
def add_subproject(self, child):
subproject, created = ProjectRelationship.objects.get_or_create(
parent=self,
child=child,
)
return subproject
def remove_subproject(self, child):
ProjectRelationship.objects.filter(
parent=self,
child=child).delete()
return
class FileManager(models.Manager):
def live(self, *args, **kwargs):
base_qs = self.filter(status=constants.LIVE_STATUS)
return base_qs.filter(*args, **kwargs)
class File(models.Model):
project = models.ForeignKey(Project, related_name='files')
parent = models.ForeignKey('self', null=True, blank=True,
related_name='children')
heading = models.CharField(max_length=255)
slug = models.SlugField()
content = models.TextField()
denormalized_path = models.CharField(max_length=255, editable=False)
ordering = models.PositiveSmallIntegerField(default=1)
status = models.PositiveSmallIntegerField(choices=constants.STATUS_CHOICES,
default=constants.LIVE_STATUS)
objects = FileManager()
class Meta:
ordering = ('denormalized_path',)
def __unicode__(self):
return '%s: %s' % (self.project.name, self.heading)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.heading)
if self.parent:
path = '%s/%s' % (self.parent.denormalized_path, self.slug)
else:
path = self.slug
self.denormalized_path = path
super(File, self).save(*args, **kwargs)
if self.children:
def update_children(children):
for child in children:
child.save()
update_children(child.children.all())
update_children(self.children.all())
#Update modified time on project.
self.project.save()
@property
def depth(self):
return len(self.denormalized_path.split('/'))
def create_revision(self, old_content, comment):
FileRevision.objects.create(
file=self,
comment=comment,
diff=diff(self.content, old_content)
)
@property
def current_revision(self):
return self.revisions.filter(is_reverted=False)[0]
def get_html_diff(self, rev_from, rev_to):
rev_from = self.revisions.get(revision_number=rev_from)
rev_to = self.revisions.get(revision_number=rev_to)
diffs = dmp.diff_main(rev_from.diff, rev_to.diff)
return dmp.diff_prettyHtml(diffs)
def revert_to(self, revision_number):
revision = self.revisions.get(revision_number=revision_number)
revision.apply()
@property
def filename(self):
return os.path.join(
self.project.path,
'%s.rst' % self.denormalized_path
)
def get_rendered(self):
return render_to_string('projects/doc_file.rst.html', {'file': self})
def write_to_disk(self):
safe_write(self.filename, self.get_rendered())
@models.permalink
def get_absolute_url(self):
return ('docs_detail', [self.project.slug, 'en', 'latest',
self.denormalized_path + '.html'])
class FileRevision(models.Model):
file = models.ForeignKey(File, related_name='revisions')
comment = models.TextField(blank=True)
diff = models.TextField(blank=True)
created_date = models.DateTimeField(auto_now_add=True)
revision_number = models.IntegerField()
is_reverted = models.BooleanField(default=False)
class Meta:
ordering = ('-revision_number',)
def __unicode__(self):
return self.comment or '%s #%s' % (self.file.heading,
self.revision_number)
def get_file_content(self):
"""
Apply the series of diffs after this revision in reverse order,
bringing the content back to the state it was in this revision
"""
after = self.file.revisions.filter(
revision_number__gt=self.revision_number)
content = self.file.content
for revision in after:
patch = dmp.patch_fromText(revision.diff)
content = dmp.patch_apply(patch, content)[0]
return content
def apply(self):
original_content = self.file.content
# store the old content on the file
self.file.content = self.get_file_content()
self.file.save()
# mark reverted changesets
reverted_qs = self.file.revisions.filter(
revision_number__gt=self.revision_number)
reverted_qs.update(is_reverted=True)
# create a new revision
FileRevision.objects.create(
file=self.file,
comment='Reverted to #%s' % self.revision_number,
diff=diff(self.file.content, original_content)
)
def save(self, *args, **kwargs):
if not self.pk:
max_rev = self.file.revisions.aggregate(
max=models.Max('revision_number'))
if max_rev['max'] is None:
self.revision_number = 1
else:
self.revision_number = max_rev['max'] + 1
super(FileRevision, self).save(*args, **kwargs)
class ImportedFile(models.Model):
project = models.ForeignKey(Project, related_name='imported_files')
name = models.CharField(max_length=255)
slug = models.SlugField()
path = models.CharField(max_length=255)
md5 = models.CharField(max_length=255)
@models.permalink
def get_absolute_url(self):
return ('docs_detail', [self.project.slug, 'en', 'latest', self.path])
def __unicode__(self):
return '%s: %s' % (self.name, self.project)
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from unittest import mock
from oslo_utils import timeutils
from sahara.conductor import manager
from sahara import context
from sahara.service.castellan import config as castellan
import sahara.service.periodic as p
import sahara.tests.unit.base as base
from sahara.tests.unit.conductor.manager import test_clusters as tc
from sahara.tests.unit.conductor.manager import test_edp as te
from sahara.utils import cluster as c_u
class TestPeriodicBack(base.SaharaWithDbTestCase):
def setUp(self):
super(TestPeriodicBack, self).setUp()
self.api = manager.ConductorManager()
castellan.validate_config()
@mock.patch('sahara.service.edp.job_manager.get_job_status')
def test_job_status_update(self, get_job_status):
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
self._create_job_execution({"end_time": datetime.datetime.now(),
"id": 1},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3},
job, ds, ds)
p._make_periodic_tasks().update_job_statuses(None)
self.assertEqual(2, get_job_status.call_count)
get_job_status.assert_has_calls([mock.call('2'),
mock.call('3')])
@mock.patch('sahara.service.trusts.use_os_admin_auth_token')
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_transient_cluster_terminate(self, terminate_cluster,
use_os_admin_auth_token):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 0))
ctx = context.ctx()
job = self.api.job_create(ctx, te.SAMPLE_JOB)
ds = self.api.data_source_create(ctx, te.SAMPLE_DATA_SOURCE)
self._make_cluster('1')
self._make_cluster('2')
self._create_job_execution({"end_time": timeutils.utcnow(),
"id": 1,
"cluster_id": "1"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 2,
"cluster_id": "2"},
job, ds, ds)
self._create_job_execution({"end_time": None,
"id": 3,
"cluster_id": "2"},
job, ds, ds)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 1))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call('1')])
self.assertEqual(1, use_os_admin_auth_token.call_count)
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_not_transient_cluster_does_not_terminate(self, terminate_cluster):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 0))
self._make_cluster('1', is_transient=False)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, 0, 1))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_transient_cluster_not_killed_too_early(self, terminate_cluster):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=20))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch('sahara.service.trusts.use_os_admin_auth_token')
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_transient_cluster_killed_in_time(self, terminate_cluster,
use_os_admin_auth_token):
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=40))
p._make_periodic_tasks().terminate_unneeded_transient_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call('1')])
self.assertEqual(1, use_os_admin_auth_token.call_count)
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_incomplete_cluster_not_killed_too_early(self, terminate_cluster):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1', c_u.CLUSTER_STATUS_SPAWNING)
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, minute=59, second=50))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch('sahara.service.trusts.use_os_admin_auth_token')
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_incomplete_cluster_killed_in_time(self, terminate_cluster,
use_os_admin_auth_token):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1', c_u.CLUSTER_STATUS_SPAWNING)
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, hour=1, second=10))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(1, terminate_cluster.call_count)
terminate_cluster.assert_has_calls([mock.call('1')])
self.assertEqual(1, use_os_admin_auth_token.call_count)
@mock.patch('sahara.service.api.v10.terminate_cluster')
def test_active_cluster_not_killed_as_inactive(
self, terminate_cluster):
self.override_config('cleanup_time_for_incomplete_clusters', 1)
timeutils.set_time_override(datetime.datetime(2005, 2, 1, second=0))
self._make_cluster('1')
timeutils.set_time_override(datetime.datetime(
2005, 2, 1, hour=1, second=10))
p._make_periodic_tasks().terminate_incomplete_clusters(None)
self.assertEqual(0, terminate_cluster.call_count)
@mock.patch("sahara.utils.proxy.proxy_domain_users_list")
@mock.patch("sahara.utils.proxy.proxy_user_delete")
@mock.patch("sahara.service.periodic.conductor.job_execution_get")
def test_check_for_zombie_proxy_users(self, mock_conductor_je_get,
mock_user_delete,
mock_users_list):
user_0 = mock.MagicMock()
user_0.name = "admin"
user_0.id = 0
user_1 = mock.MagicMock()
user_1.name = "job_0"
user_1.id = 1
user_2 = mock.MagicMock()
user_2.name = "job_1"
user_2.id = 2
mock_users_list.return_value = [user_0, user_1, user_2]
je_0 = mock.MagicMock()
je_0.id = 0
je_0.info = {"status": "KILLED"}
je_1 = mock.MagicMock()
je_1.id = 1
je_1.info = {"status": "WAITING"}
mock_conductor_je_get.side_effect = [je_0, je_1]
p._make_periodic_tasks().check_for_zombie_proxy_users(None)
mock_user_delete.assert_called_once_with(user_id=1)
@mock.patch(
'sahara.service.health.verification_base.validate_verification_start')
@mock.patch('sahara.service.api.v10.update_cluster')
def test_run_verifications_executed(self, cluster_update, ver_valid):
self._make_cluster('1')
p._make_periodic_tasks().run_verifications(None)
self.assertEqual(1, ver_valid.call_count)
cluster_update.assert_called_once_with(
'1', {'verification': {'status': 'START'}})
@mock.patch(
'sahara.service.health.verification_base.validate_verification_start')
@mock.patch('sahara.service.api.v10.update_cluster')
def test_run_verifications_not_executed(self, cluster_update, ver_valid):
self._make_cluster('1', status=c_u.CLUSTER_STATUS_ERROR)
p._make_periodic_tasks().run_verifications(None)
ver_valid.assert_not_called()
cluster_update.assert_not_called()
@mock.patch("sahara.service.periodic.threadgroup")
@mock.patch("sahara.service.periodic.CONF")
def test_setup_enabled(self, mock_conf, mock_thread_group):
mock_conf.periodic_enable = True
mock_conf.periodic_fuzzy_delay = 20
mock_conf.periodic_interval_max = 30
mock_conf.periodic_workers_number = 1
mock_conf.periodic_coordinator_backend_url = ''
add_timer = mock_thread_group.ThreadGroup().add_dynamic_timer
p.setup()
self.assertTrue(add_timer._mock_called)
@mock.patch("sahara.service.periodic.threadgroup")
@mock.patch("sahara.service.periodic.CONF")
def test_setup_disabled(self, mock_conf, mock_thread_group):
mock_conf.periodic_enable = False
add_timer = mock_thread_group.ThreadGroup().add_dynamic_timer
p.setup()
self.assertFalse(add_timer._mock_called)
def _make_cluster(self, id_name, status=c_u.CLUSTER_STATUS_ACTIVE,
is_transient=True):
ctx = context.ctx()
c = tc.SAMPLE_CLUSTER.copy()
c["is_transient"] = is_transient
c["status"] = status
c["id"] = id_name
c["name"] = id_name
c['updated_at'] = timeutils.utcnow()
c['trust_id'] = 'DEADBEEF-DEAD-BEEF-DEAD-BEEFDEADBEEF'
self.api.cluster_create(ctx, c)
def _create_job_execution(self, values, job, input, output):
values.update({"job_id": job['id'],
"input_id": input['id'],
"output_id": output['id']})
self.api.job_execution_create(context.ctx(), values)
| |
# Copyright [2015] Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from mock import Mock
from mock import patch
from trove.cluster.models import ClusterTasks as ClusterTaskStatus
from trove.cluster.models import DBCluster
import trove.common.context as context
from trove.common.exception import GuestError
from trove.common.strategies.cluster.experimental.vertica.taskmanager import (
VerticaClusterTasks as ClusterTasks)
from trove.common.strategies.cluster.experimental.vertica.taskmanager import (
VerticaTaskManagerAPI as task_api)
from trove.common.strategies.cluster.experimental.vertica.taskmanager import (
VerticaTaskManagerStrategy as task_strategy)
from trove.datastore import models as datastore_models
from trove.instance.models import BaseInstance
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.instance.models import InstanceServiceStatus
from trove.instance.models import InstanceTasks
from trove import rpc
from trove.taskmanager.models import ServiceStatuses
from trove.tests.unittests import trove_testtools
class VerticaClusterTasksTest(trove_testtools.TestCase):
def setUp(self):
super(VerticaClusterTasksTest, self).setUp()
self.cluster_id = "1232"
self.cluster_name = "Cluster-1234"
self.tenant_id = "6789"
self.db_cluster = DBCluster(ClusterTaskStatus.NONE,
id=self.cluster_id,
created=str(datetime.date),
updated=str(datetime.date),
name=self.cluster_name,
task_id=ClusterTaskStatus.NONE._code,
tenant_id=self.tenant_id,
datastore_version_id="1",
deleted=False)
self.dbinst1 = DBInstance(InstanceTasks.NONE, id="1", name="member1",
compute_instance_id="compute-1",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-1",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="master")
self.dbinst2 = DBInstance(InstanceTasks.NONE, id="2", name="member2",
compute_instance_id="compute-2",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-2",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
self.dbinst3 = DBInstance(InstanceTasks.NONE, id="3", name="member3",
compute_instance_id="compute-3",
task_id=InstanceTasks.NONE._code,
task_description=InstanceTasks.NONE._db_text,
volume_id="volume-3",
datastore_version_id="1",
cluster_id=self.cluster_id,
type="member")
mock_ds1 = Mock()
mock_ds1.name = 'vertica'
mock_dv1 = Mock()
mock_dv1.name = '7.1'
self.clustertasks = ClusterTasks(Mock(),
self.db_cluster,
datastore=mock_ds1,
datastore_version=mock_dv1)
@patch.object(ClusterTasks, 'update_statuses_on_failure')
@patch.object(InstanceServiceStatus, 'find_by')
@patch('trove.taskmanager.models.LOG')
def test_all_instances_ready_bad_status(self, mock_logging,
mock_find, mock_update):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.FAILED
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
mock_update.assert_called_with(self.cluster_id, None)
self.assertFalse(ret_val)
@patch.object(InstanceServiceStatus, 'find_by')
def test_all_instances_ready(self, mock_find):
(mock_find.return_value.
get_status.return_value) = ServiceStatuses.INSTANCE_READY
ret_val = self.clustertasks._all_instances_ready(["1", "2", "3", "4"],
self.cluster_id)
self.assertTrue(ret_val)
@patch.object(ClusterTasks, 'reset_task')
@patch.object(ClusterTasks, '_all_instances_ready', return_value=False)
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_create_cluster_instance_not_ready(self, mock_dv, mock_ds,
mock_find_all, mock_load,
mock_ready, mock_reset_task):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_reset_task.assert_called_with()
@patch.object(ClusterTasks, 'reset_task')
@patch.object(ClusterTasks, 'get_guest')
@patch.object(ClusterTasks, 'get_ip')
@patch.object(ClusterTasks, '_all_instances_ready')
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
def test_create_cluster(self, mock_dv, mock_ds, mock_find_all, mock_load,
mock_ready, mock_ip, mock_guest, mock_reset_task):
cluster_instances = [self.dbinst1, self.dbinst2, self.dbinst3]
for instance in cluster_instances:
if instance['type'] == "master":
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_ready.return_value = True
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
mock_ip.return_value = "10.0.0.2"
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_guest.return_value.install_cluster.assert_called_with(
['10.0.0.2'])
mock_reset_task.assert_called_with()
mock_guest.return_value.cluster_complete.assert_called_with()
@patch.object(ClusterTasks, 'update_statuses_on_failure')
@patch.object(ClusterTasks, 'reset_task')
@patch.object(ClusterTasks, 'get_ip')
@patch.object(ClusterTasks, '_all_instances_ready')
@patch.object(Instance, 'load')
@patch.object(DBInstance, 'find_all')
@patch.object(datastore_models.Datastore, 'load')
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
@patch(
'trove.common.strategies.cluster.experimental.vertica.taskmanager.LOG')
def test_create_cluster_fail(self, mock_logging, mock_dv, mock_ds,
mock_find_all,
mock_load, mock_ready, mock_ip,
mock_reset_task, mock_update_status):
mock_find_all.return_value.all.return_value = [self.dbinst1]
mock_load.return_value = BaseInstance(Mock(),
self.dbinst1, Mock(),
InstanceServiceStatus(
ServiceStatuses.NEW))
mock_ip.return_value = "10.0.0.2"
guest_client = Mock()
guest_client.install_cluster = Mock(side_effect=GuestError("Error"))
with patch.object(ClusterTasks, 'get_guest',
return_value=guest_client):
self.clustertasks.create_cluster(Mock(), self.cluster_id)
mock_update_status.assert_called_with('1232')
mock_reset_task.assert_called_with()
class VerticaTaskManagerAPITest(trove_testtools.TestCase):
@patch.object(rpc, 'get_client', Mock(return_value=Mock()))
def setUp(self):
super(VerticaTaskManagerAPITest, self).setUp()
self.context = context.TroveContext()
self.api = task_api(self.context)
self.call_context = trove_testtools.TroveTestContext(self)
self.api.client.prepare = Mock(return_value=self.call_context)
self.call_context.cast = Mock()
self.rpc_api_version = '1.0'
def test_task_manager_api_cast(self):
self.api._cast(method_name='test_method', version=self.rpc_api_version)
self.call_context.cast.assert_called_with(self.context, 'test_method')
class VerticaTaskManagerStrategyTest(trove_testtools.TestCase):
def test_task_manager_cluster_tasks_class(self):
vertica_strategy = task_strategy()
self.assertFalse(
hasattr(vertica_strategy.task_manager_cluster_tasks_class,
'rebuild_cluster'))
self.assertTrue(callable(
vertica_strategy.task_manager_cluster_tasks_class.create_cluster))
def test_task_manager_api_class(self):
vertica_strategy = task_strategy()
self.assertFalse(hasattr(vertica_strategy.task_manager_api_class,
'add_new_node'))
self.assertTrue(
callable(vertica_strategy.task_manager_api_class._cast))
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.db"""
import os
import sys
import unittest
from tempfile import mkdtemp
from shutil import rmtree, copy
from uuid import uuid4
import six.moves.cPickle as pickle
import json
import sqlite3
import itertools
import time
import random
from mock import patch, MagicMock
from eventlet.timeout import Timeout
from six.moves import range
import swift.common.db
from swift.common.constraints import \
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE
from swift.common.db import chexor, dict_factory, get_db_connection, \
DatabaseBroker, DatabaseConnectionError, DatabaseAlreadyExists, \
GreenDBConnection, PICKLE_PROTOCOL
from swift.common.utils import normalize_timestamp, mkdirs, Timestamp
from swift.common.exceptions import LockTimeout
from swift.common.swob import HTTPException
from test.unit import with_tempdir
class TestDatabaseConnectionError(unittest.TestCase):
def test_str(self):
err = \
DatabaseConnectionError(':memory:', 'No valid database connection')
self.assertTrue(':memory:' in str(err))
self.assertTrue('No valid database connection' in str(err))
err = DatabaseConnectionError(':memory:',
'No valid database connection',
timeout=1357)
self.assertTrue(':memory:' in str(err))
self.assertTrue('No valid database connection' in str(err))
self.assertTrue('1357' in str(err))
class TestDictFactory(unittest.TestCase):
def test_normal_case(self):
conn = sqlite3.connect(':memory:')
conn.execute('CREATE TABLE test (one TEXT, two INTEGER)')
conn.execute('INSERT INTO test (one, two) VALUES ("abc", 123)')
conn.execute('INSERT INTO test (one, two) VALUES ("def", 456)')
conn.commit()
curs = conn.execute('SELECT one, two FROM test')
self.assertEqual(dict_factory(curs, next(curs)),
{'one': 'abc', 'two': 123})
self.assertEqual(dict_factory(curs, next(curs)),
{'one': 'def', 'two': 456})
class TestChexor(unittest.TestCase):
def test_normal_case(self):
self.assertEqual(
chexor('d41d8cd98f00b204e9800998ecf8427e',
'new name', normalize_timestamp(1)),
'4f2ea31ac14d4273fe32ba08062b21de')
def test_invalid_old_hash(self):
self.assertRaises(ValueError, chexor, 'oldhash', 'name',
normalize_timestamp(1))
def test_no_name(self):
self.assertRaises(Exception, chexor,
'd41d8cd98f00b204e9800998ecf8427e', None,
normalize_timestamp(1))
def test_chexor(self):
ts = (normalize_timestamp(ts) for ts in
itertools.count(int(time.time())))
objects = [
('frank', next(ts)),
('bob', next(ts)),
('tom', next(ts)),
('frank', next(ts)),
('tom', next(ts)),
('bob', next(ts)),
]
hash_ = '0'
random.shuffle(objects)
for obj in objects:
hash_ = chexor(hash_, *obj)
other_hash = '0'
random.shuffle(objects)
for obj in objects:
other_hash = chexor(other_hash, *obj)
self.assertEqual(hash_, other_hash)
class TestGreenDBConnection(unittest.TestCase):
def test_execute_when_locked(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptCursor.execute = MagicMock(side_effect=db_error)
with patch('sqlite3.Cursor', new=InterceptCursor):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.execute, 'select 1')
self.assertTrue(InterceptCursor.execute.called)
self.assertEqual(InterceptCursor.execute.call_args_list,
list((InterceptCursor.execute.call_args,) *
InterceptCursor.execute.call_count))
def text_commit_when_locked(self):
# This test is dependent on the code under test calling commit and
# commit as sqlite3.Connection.commit in a subclass.
class InterceptConnection(sqlite3.Connection):
pass
db_error = sqlite3.OperationalError('database is locked')
InterceptConnection.commit = MagicMock(side_effect=db_error)
with patch('sqlite3.Connection', new=InterceptConnection):
conn = sqlite3.connect(':memory:', check_same_thread=False,
factory=GreenDBConnection, timeout=0.1)
self.assertRaises(Timeout, conn.commit)
self.assertTrue(InterceptConnection.commit.called)
self.assertEqual(InterceptConnection.commit.call_args_list,
list((InterceptConnection.commit.call_args,) *
InterceptConnection.commit.call_count))
class TestGetDBConnection(unittest.TestCase):
def test_normal_case(self):
conn = get_db_connection(':memory:')
self.assertTrue(hasattr(conn, 'execute'))
def test_invalid_path(self):
self.assertRaises(DatabaseConnectionError, get_db_connection,
'invalid database path / name')
def test_locked_db(self):
# This test is dependent on the code under test calling execute and
# commit as sqlite3.Cursor.execute in a subclass.
class InterceptCursor(sqlite3.Cursor):
pass
db_error = sqlite3.OperationalError('database is locked')
mock_db_cmd = MagicMock(side_effect=db_error)
InterceptCursor.execute = mock_db_cmd
with patch('sqlite3.Cursor', new=InterceptCursor):
self.assertRaises(Timeout, get_db_connection, ':memory:',
timeout=0.1)
self.assertTrue(mock_db_cmd.called)
self.assertEqual(mock_db_cmd.call_args_list,
list((mock_db_cmd.call_args,) *
mock_db_cmd.call_count))
class ExampleBroker(DatabaseBroker):
"""
Concrete enough implementation of a DatabaseBroker.
"""
db_type = 'test'
db_contains_type = 'test'
db_reclaim_timestamp = 'created_at'
def _initialize(self, conn, put_timestamp, **kwargs):
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
conn.executescript('''
CREATE TABLE test_stat (
account TEXT,
test_count INTEGER DEFAULT 0,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
created_at TEXT,
deleted INTEGER DEFAULT 0
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + (1 - new.deleted);
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - (1 - old.deleted);
END;
''')
conn.execute("""
INSERT INTO test_stat (
account, created_at, id, put_timestamp, status_changed_at)
VALUES (?, ?, ?, ?, ?);
""", (self.account, Timestamp(time.time()).internal, str(uuid4()),
put_timestamp, put_timestamp))
def merge_items(self, item_list):
with self.get() as conn:
for rec in item_list:
conn.execute(
'DELETE FROM test WHERE name = ? and created_at < ?', (
rec['name'], rec['created_at']))
if not conn.execute(
'SELECT 1 FROM test WHERE name = ?',
(rec['name'],)).fetchall():
conn.execute('''
INSERT INTO test (name, created_at, deleted)
VALUES (?, ?, ?)''', (
rec['name'], rec['created_at'], rec['deleted']))
conn.commit()
def _commit_puts_load(self, item_list, entry):
(name, timestamp, deleted) = pickle.loads(entry.decode('base64'))
item_list.append({
'name': name,
'created_at': timestamp,
'deleted': deleted,
})
def _load_item(self, name, timestamp, deleted):
if self.db_file == ':memory:':
record = {
'name': name,
'created_at': timestamp,
'deleted': deleted,
}
self.merge_items([record])
return
with open(self.pending_file, 'a+b') as fp:
fp.write(':')
fp.write(pickle.dumps(
(name, timestamp, deleted),
protocol=PICKLE_PROTOCOL).encode('base64'))
fp.flush()
def put_test(self, name, timestamp):
self._load_item(name, timestamp, 0)
def delete_test(self, name, timestamp):
self._load_item(name, timestamp, 1)
def _delete_db(self, conn, timestamp):
conn.execute("""
UPDATE test_stat
SET delete_timestamp = ?,
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _is_deleted(self, conn):
info = conn.execute('SELECT * FROM test_stat').fetchone()
return (info['test_count'] in (None, '', 0, '0')) and \
(Timestamp(info['delete_timestamp']) >
Timestamp(info['put_timestamp']))
class TestExampleBroker(unittest.TestCase):
"""
Tests that use the mostly Concrete enough ExampleBroker to exercise some
of the abstract methods on DatabaseBroker.
"""
broker_class = ExampleBroker
policy = 0
def setUp(self):
self.ts = (Timestamp(t).internal for t in
itertools.count(int(time.time())))
def test_delete_db(self):
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(next(self.ts))
broker.delete_db(next(self.ts))
self.assertTrue(broker.is_deleted())
def test_merge_timestamps_simple_delete(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp)
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp, '0')
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], '0')
self.assertEqual(info['status_changed_at'], put_timestamp)
# delete
delete_timestamp = next(self.ts)
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertTrue(info['status_changed_at'] > Timestamp(put_timestamp))
def put_item(self, broker, timestamp):
broker.put_test('test', timestamp)
def delete_item(self, broker, timestamp):
broker.delete_test('test', timestamp)
def test_merge_timestamps_delete_with_objects(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp, '0')
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], '0')
self.assertEqual(info['status_changed_at'], put_timestamp)
# add object
self.put_item(broker, next(self.ts))
self.assertEqual(broker.get_info()[
'%s_count' % broker.db_contains_type], 1)
# delete
delete_timestamp = next(self.ts)
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
# status is unchanged
self.assertEqual(info['status_changed_at'], put_timestamp)
# count is causing status to hold on
self.delete_item(broker, next(self.ts))
self.assertEqual(broker.get_info()[
'%s_count' % broker.db_contains_type], 0)
self.assertTrue(broker.is_deleted())
def test_merge_timestamps_simple_recreate(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
virgin_status_changed_at = broker.get_info()['status_changed_at']
created_at = broker.get_info()['created_at']
delete_timestamp = next(self.ts)
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
orig_status_changed_at = info['status_changed_at']
self.assertTrue(orig_status_changed_at >
Timestamp(virgin_status_changed_at))
# recreate
recreate_timestamp = next(self.ts)
status_changed_at = time.time()
with patch('swift.common.db.time.time', new=lambda: status_changed_at):
broker.merge_timestamps(created_at, recreate_timestamp, '0')
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], recreate_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertTrue(info['status_changed_at'], status_changed_at)
def test_merge_timestamps_recreate_with_objects(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
# delete
delete_timestamp = next(self.ts)
broker.merge_timestamps(created_at, put_timestamp, delete_timestamp)
self.assertTrue(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], put_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
orig_status_changed_at = info['status_changed_at']
self.assertTrue(Timestamp(orig_status_changed_at) >=
Timestamp(put_timestamp))
# add object
self.put_item(broker, next(self.ts))
count_key = '%s_count' % broker.db_contains_type
self.assertEqual(broker.get_info()[count_key], 1)
self.assertFalse(broker.is_deleted())
# recreate
recreate_timestamp = next(self.ts)
broker.merge_timestamps(created_at, recreate_timestamp, '0')
self.assertFalse(broker.is_deleted())
info = broker.get_info()
self.assertEqual(info['created_at'], created_at)
self.assertEqual(info['put_timestamp'], recreate_timestamp)
self.assertEqual(info['delete_timestamp'], delete_timestamp)
self.assertEqual(info['status_changed_at'], orig_status_changed_at)
# count is not causing status to hold on
self.delete_item(broker, next(self.ts))
self.assertFalse(broker.is_deleted())
def test_merge_timestamps_update_put_no_status_change(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
info = broker.get_info()
orig_status_changed_at = info['status_changed_at']
created_at = info['created_at']
new_put_timestamp = next(self.ts)
broker.merge_timestamps(created_at, new_put_timestamp, '0')
info = broker.get_info()
self.assertEqual(new_put_timestamp, info['put_timestamp'])
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
def test_merge_timestamps_update_delete_no_status_change(self):
put_timestamp = next(self.ts)
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(put_timestamp, storage_policy_index=int(self.policy))
created_at = broker.get_info()['created_at']
broker.merge_timestamps(created_at, put_timestamp, next(self.ts))
orig_status_changed_at = broker.get_info()['status_changed_at']
new_delete_timestamp = next(self.ts)
broker.merge_timestamps(created_at, put_timestamp,
new_delete_timestamp)
info = broker.get_info()
self.assertEqual(new_delete_timestamp, info['delete_timestamp'])
self.assertEqual(orig_status_changed_at, info['status_changed_at'])
def test_get_max_row(self):
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(next(self.ts), storage_policy_index=int(self.policy))
self.assertEqual(-1, broker.get_max_row())
self.put_item(broker, next(self.ts))
self.assertEqual(1, broker.get_max_row())
self.delete_item(broker, next(self.ts))
self.assertEqual(2, broker.get_max_row())
self.put_item(broker, next(self.ts))
self.assertEqual(3, broker.get_max_row())
def test_get_info(self):
broker = self.broker_class(':memory:', account='test', container='c')
created_at = time.time()
with patch('swift.common.db.time.time', new=lambda: created_at):
broker.initialize(Timestamp(1).internal,
storage_policy_index=int(self.policy))
info = broker.get_info()
count_key = '%s_count' % broker.db_contains_type
expected = {
count_key: 0,
'created_at': Timestamp(created_at).internal,
'put_timestamp': Timestamp(1).internal,
'status_changed_at': Timestamp(1).internal,
'delete_timestamp': '0',
}
for k, v in expected.items():
self.assertEqual(info[k], v,
'mismatch for %s, %s != %s' % (
k, info[k], v))
def test_get_raw_metadata(self):
broker = self.broker_class(':memory:', account='test', container='c')
broker.initialize(Timestamp(0).internal,
storage_policy_index=int(self.policy))
self.assertEqual(broker.metadata, {})
self.assertEqual(broker.get_raw_metadata(), '')
key = u'test\u062a'.encode('utf-8')
value = u'value\u062a'
metadata = {
key: [value, Timestamp(1).internal]
}
broker.update_metadata(metadata)
self.assertEqual(broker.metadata, metadata)
self.assertEqual(broker.get_raw_metadata(),
json.dumps(metadata))
def test_put_timestamp(self):
broker = self.broker_class(':memory:', account='a', container='c')
orig_put_timestamp = next(self.ts)
broker.initialize(orig_put_timestamp,
storage_policy_index=int(self.policy))
self.assertEqual(broker.get_info()['put_timestamp'],
orig_put_timestamp)
# put_timestamp equal - no change
broker.update_put_timestamp(orig_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
orig_put_timestamp)
# put_timestamp newer - gets newer
newer_put_timestamp = next(self.ts)
broker.update_put_timestamp(newer_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
newer_put_timestamp)
# put_timestamp older - no change
broker.update_put_timestamp(orig_put_timestamp)
self.assertEqual(broker.get_info()['put_timestamp'],
newer_put_timestamp)
def test_status_changed_at(self):
broker = self.broker_class(':memory:', account='test', container='c')
put_timestamp = next(self.ts)
created_at = time.time()
with patch('swift.common.db.time.time', new=lambda: created_at):
broker.initialize(put_timestamp,
storage_policy_index=int(self.policy))
self.assertEqual(broker.get_info()['status_changed_at'],
put_timestamp)
self.assertEqual(broker.get_info()['created_at'],
Timestamp(created_at).internal)
status_changed_at = next(self.ts)
broker.update_status_changed_at(status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
# save the old and get a new status_changed_at
old_status_changed_at, status_changed_at = \
status_changed_at, next(self.ts)
broker.update_status_changed_at(status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
# status changed at won't go backwards...
broker.update_status_changed_at(old_status_changed_at)
self.assertEqual(broker.get_info()['status_changed_at'],
status_changed_at)
def test_get_syncs(self):
broker = self.broker_class(':memory:', account='a', container='c')
broker.initialize(Timestamp(time.time()).internal,
storage_policy_index=int(self.policy))
self.assertEqual([], broker.get_syncs())
broker.merge_syncs([{'sync_point': 1, 'remote_id': 'remote1'}])
self.assertEqual([{'sync_point': 1, 'remote_id': 'remote1'}],
broker.get_syncs())
self.assertEqual([], broker.get_syncs(incoming=False))
broker.merge_syncs([{'sync_point': 2, 'remote_id': 'remote2'}],
incoming=False)
self.assertEqual([{'sync_point': 2, 'remote_id': 'remote2'}],
broker.get_syncs(incoming=False))
@with_tempdir
def test_commit_pending(self, tempdir):
broker = self.broker_class(os.path.join(tempdir, 'test.db'),
account='a', container='c')
broker.initialize(next(self.ts),
storage_policy_index=int(self.policy))
self.put_item(broker, next(self.ts))
qry = 'select * from %s_stat' % broker.db_type
with broker.get() as conn:
rows = [dict(x) for x in conn.execute(qry)]
info = rows[0]
count_key = '%s_count' % broker.db_contains_type
self.assertEqual(0, info[count_key])
broker.get_info()
self.assertEqual(1, broker.get_info()[count_key])
class TestDatabaseBroker(unittest.TestCase):
def setUp(self):
self.testdir = mkdtemp()
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_DB_PREALLOCATION_setting(self):
u = uuid4().hex
b = DatabaseBroker(u)
swift.common.db.DB_PREALLOCATION = False
b._preallocate()
swift.common.db.DB_PREALLOCATION = True
self.assertRaises(OSError, b._preallocate)
def test_memory_db_init(self):
broker = DatabaseBroker(':memory:')
self.assertEqual(broker.db_file, ':memory:')
self.assertRaises(AttributeError, broker.initialize,
normalize_timestamp('0'))
def test_disk_db_init(self):
db_file = os.path.join(self.testdir, '1.db')
broker = DatabaseBroker(db_file)
self.assertEqual(broker.db_file, db_file)
self.assertTrue(broker.conn is None)
def test_disk_preallocate(self):
test_size = [-1]
def fallocate_stub(fd, size):
test_size[0] = size
with patch('swift.common.db.fallocate', fallocate_stub):
db_file = os.path.join(self.testdir, 'pre.db')
# Write 1 byte and hope that the fs will allocate less than 1 MB.
f = open(db_file, "w")
f.write('@')
f.close()
b = DatabaseBroker(db_file)
b._preallocate()
# We only wrote 1 byte, so we should end with the 1st step or 1 MB.
self.assertEqual(test_size[0], 1024 * 1024)
def test_initialize(self):
self.assertRaises(AttributeError,
DatabaseBroker(':memory:').initialize,
normalize_timestamp('1'))
stub_dict = {}
def stub(*args, **kwargs):
for key in stub_dict.keys():
del stub_dict[key]
stub_dict['args'] = args
for key, value in kwargs.items():
stub_dict[key] = value
broker = DatabaseBroker(':memory:')
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
self.assertTrue(hasattr(stub_dict['args'][0], 'execute'))
self.assertEqual(stub_dict['args'][1], '0000000001.00000')
with broker.get() as conn:
conn.execute('SELECT * FROM outgoing_sync')
conn.execute('SELECT * FROM incoming_sync')
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker._initialize = stub
self.assertRaises(DatabaseAlreadyExists,
broker.initialize, normalize_timestamp('1'))
def test_delete_db(self):
def init_stub(conn, put_timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)',
(str(uuid4),))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
stub_called = [False]
def delete_stub(*a, **kw):
stub_called[0] = True
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker._initialize = init_stub
# Initializes a good broker for us
broker.initialize(normalize_timestamp('1'))
self.assertTrue(broker.conn is not None)
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assertTrue(stub_called[0])
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
broker.db_type = 'test'
broker._initialize = init_stub
broker.initialize(normalize_timestamp('1'))
broker._delete_db = delete_stub
stub_called[0] = False
broker.delete_db('2')
self.assertTrue(stub_called[0])
# ensure that metadata was cleared
m2 = broker.metadata
self.assertTrue(not any(v[0] for v in m2.itervalues()))
self.assertTrue(all(v[1] == normalize_timestamp('2')
for v in m2.itervalues()))
def test_get(self):
broker = DatabaseBroker(':memory:')
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
got_exc = False
try:
with broker.get() as conn:
conn.execute('SELECT 1')
except Exception:
got_exc = True
self.assertTrue(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.get() as conn:
conn.execute('CREATE TABLE test (one TEXT)')
try:
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
raise Exception('test')
conn.commit()
except Exception:
pass
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEqual(
[r[0] for r in conn.execute('SELECT * FROM test')], [])
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'))
with broker.get() as conn:
self.assertEqual(
[r[0] for r in conn.execute('SELECT * FROM test')], ['1'])
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
with patch('swift.common.db.renamer', lambda a, b,
fsync: b):
# Test malformed database
copy(os.path.join(os.path.dirname(__file__),
'malformed_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Quarantined %s to %s due to malformed database' %
(dbpath, qpath))
# Test corrupted database
copy(os.path.join(os.path.dirname(__file__),
'corrupted_example.db'),
os.path.join(dbpath, '1.db'))
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
exc = None
try:
with broker.get() as conn:
conn.execute('SELECT * FROM test')
except Exception as err:
exc = err
self.assertEqual(
str(exc),
'Quarantined %s to %s due to corrupted database' %
(dbpath, qpath))
def test_lock(self):
broker = DatabaseBroker(os.path.join(self.testdir, '1.db'), timeout=.1)
got_exc = False
try:
with broker.lock():
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
with broker.lock():
pass
with broker.lock():
pass
broker2 = DatabaseBroker(os.path.join(self.testdir, '1.db'),
timeout=.1)
broker2._initialize = stub
with broker.lock():
got_exc = False
try:
with broker2.lock():
pass
except LockTimeout:
got_exc = True
self.assertTrue(got_exc)
try:
with broker.lock():
raise Exception('test')
except Exception:
pass
with broker.lock():
pass
def test_newid(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][0], -1)
self.assertEqual(points[0][1], uuid2)
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
uuid3 = str(uuid4())
broker.newid(uuid3)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
uuid1 = uuids[0]
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid3,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][1], uuid3)
broker.newid(uuid2)
with broker.get() as conn:
uuids = [r[0] for r in conn.execute('SELECT * FROM test_stat')]
self.assertEqual(len(uuids), 1)
self.assertNotEqual(uuids[0], uuid1)
points = [(r[0], r[1]) for r in conn.execute(
'SELECT sync_point, '
'remote_id FROM incoming_sync WHERE remote_id = ?', (uuid2,))]
self.assertEqual(len(points), 1)
self.assertEqual(points[0][1], uuid2)
def test_get_items_since(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.execute('INSERT INTO test (one) VALUES ("3")')
conn.commit()
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
self.assertEqual(broker.get_items_since(-1, 10),
[{'one': '1'}, {'one': '2'}, {'one': '3'}])
self.assertEqual(broker.get_items_since(-1, 2),
[{'one': '1'}, {'one': '2'}])
self.assertEqual(broker.get_items_since(1, 2),
[{'one': '2'}, {'one': '3'}])
self.assertEqual(broker.get_items_since(3, 2), [])
self.assertEqual(broker.get_items_since(999, 2), [])
def test_get_sync(self):
broker = DatabaseBroker(':memory:')
broker.db_type = 'test'
broker.db_contains_type = 'test'
uuid1 = str(uuid4())
def _initialize(conn, timestamp, **kwargs):
conn.execute('CREATE TABLE test (one TEXT)')
conn.execute('CREATE TABLE test_stat (id TEXT)')
conn.execute('INSERT INTO test_stat (id) VALUES (?)', (uuid1,))
conn.execute('INSERT INTO test (one) VALUES ("1")')
conn.commit()
pass
broker._initialize = _initialize
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
self.assertEqual(broker.get_sync(uuid2), -1)
broker.newid(uuid2)
self.assertEqual(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
self.assertEqual(broker.get_sync(uuid3), -1)
with broker.get() as conn:
conn.execute('INSERT INTO test (one) VALUES ("2")')
conn.commit()
broker.newid(uuid3)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), 2)
def test_merge_syncs(self):
broker = DatabaseBroker(':memory:')
def stub(*args, **kwargs):
pass
broker._initialize = stub
broker.initialize(normalize_timestamp('1'))
uuid2 = str(uuid4())
broker.merge_syncs([{'sync_point': 1, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 1)
uuid3 = str(uuid4())
broker.merge_syncs([{'sync_point': 2, 'remote_id': uuid3}])
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
self.assertEqual(broker.get_sync(uuid2, incoming=False), -1)
self.assertEqual(broker.get_sync(uuid3, incoming=False), -1)
broker.merge_syncs([{'sync_point': 3, 'remote_id': uuid2},
{'sync_point': 4, 'remote_id': uuid3}],
incoming=False)
self.assertEqual(broker.get_sync(uuid2, incoming=False), 3)
self.assertEqual(broker.get_sync(uuid3, incoming=False), 4)
self.assertEqual(broker.get_sync(uuid2), 1)
self.assertEqual(broker.get_sync(uuid3), 2)
broker.merge_syncs([{'sync_point': 5, 'remote_id': uuid2}])
self.assertEqual(broker.get_sync(uuid2), 5)
def test_get_replication_info(self):
self.get_replication_info_tester(metadata=False)
def test_get_replication_info_with_metadata(self):
self.get_replication_info_tester(metadata=True)
def get_replication_info_tester(self, metadata=False):
broker = DatabaseBroker(':memory:', account='a')
broker.db_type = 'test'
broker.db_contains_type = 'test'
broker_creation = normalize_timestamp(1)
broker_uuid = str(uuid4())
broker_metadata = metadata and json.dumps(
{'Test': ('Value', normalize_timestamp(1))}) or ''
def _initialize(conn, put_timestamp, **kwargs):
if put_timestamp is None:
put_timestamp = normalize_timestamp(0)
conn.executescript('''
CREATE TABLE test (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT UNIQUE,
created_at TEXT
);
CREATE TRIGGER test_insert AFTER INSERT ON test
BEGIN
UPDATE test_stat
SET test_count = test_count + 1,
hash = chexor(hash, new.name, new.created_at);
END;
CREATE TRIGGER test_update BEFORE UPDATE ON test
BEGIN
SELECT RAISE(FAIL,
'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER test_delete AFTER DELETE ON test
BEGIN
UPDATE test_stat
SET test_count = test_count - 1,
hash = chexor(hash, old.name, old.created_at);
END;
CREATE TABLE test_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
status_changed_at TEXT DEFAULT '0',
test_count INTEGER,
hash TEXT default '00000000000000000000000000000000',
id TEXT
%s
);
INSERT INTO test_stat (test_count) VALUES (0);
''' % (metadata and ", metadata TEXT DEFAULT ''" or ""))
conn.execute('''
UPDATE test_stat
SET account = ?, created_at = ?, id = ?, put_timestamp = ?,
status_changed_at = ?
''', (broker.account, broker_creation, broker_uuid, put_timestamp,
put_timestamp))
if metadata:
conn.execute('UPDATE test_stat SET metadata = ?',
(broker_metadata,))
conn.commit()
broker._initialize = _initialize
put_timestamp = normalize_timestamp(2)
broker.initialize(put_timestamp)
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': -1, 'id': broker_uuid, 'metadata': broker_metadata})
insert_timestamp = normalize_timestamp(3)
with broker.get() as conn:
conn.execute('''
INSERT INTO test (name, created_at) VALUES ('test', ?)
''', (insert_timestamp,))
conn.commit()
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 1,
'hash': 'bdc4c93f574b0d8c2911a27ce9dd38ba',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
with broker.get() as conn:
conn.execute('DELETE FROM test')
conn.commit()
info = broker.get_replication_info()
self.assertEqual(info, {
'account': broker.account, 'count': 0,
'hash': '00000000000000000000000000000000',
'created_at': broker_creation, 'put_timestamp': put_timestamp,
'delete_timestamp': '0', 'status_changed_at': put_timestamp,
'max_row': 1, 'id': broker_uuid, 'metadata': broker_metadata})
return broker
def test_metadata(self):
def reclaim(broker, timestamp):
with broker.get() as conn:
broker._reclaim(conn, timestamp)
conn.commit()
# Initializes a good broker for us
broker = self.get_replication_info_tester(metadata=True)
# Add our first item
first_timestamp = normalize_timestamp(1)
first_value = '1'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
# Add our second item
second_timestamp = normalize_timestamp(2)
second_value = '2'
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' in broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Update our first item
first_timestamp = normalize_timestamp(3)
first_value = '1b'
broker.update_metadata({'First': [first_value, first_timestamp]})
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' in broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Delete our second item (by setting to empty string)
second_timestamp = normalize_timestamp(4)
second_value = ''
broker.update_metadata({'Second': [second_value, second_timestamp]})
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' in broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point before second item was deleted
reclaim(broker, normalize_timestamp(3))
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' in broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim at point second item was deleted
reclaim(broker, normalize_timestamp(4))
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' in broker.metadata)
self.assertEqual(broker.metadata['Second'],
[second_value, second_timestamp])
# Reclaim after point second item was deleted
reclaim(broker, normalize_timestamp(5))
self.assertTrue('First' in broker.metadata)
self.assertEqual(broker.metadata['First'],
[first_value, first_timestamp])
self.assertTrue('Second' not in broker.metadata)
@patch.object(DatabaseBroker, 'validate_metadata')
def test_validate_metadata_is_called_from_update_metadata(self, mock):
broker = self.get_replication_info_tester(metadata=True)
first_timestamp = normalize_timestamp(1)
first_value = '1'
metadata = {'First': [first_value, first_timestamp]}
broker.update_metadata(metadata, validate_metadata=True)
self.assertTrue(mock.called)
@patch.object(DatabaseBroker, 'validate_metadata')
def test_validate_metadata_is_not_called_from_update_metadata(self, mock):
broker = self.get_replication_info_tester(metadata=True)
first_timestamp = normalize_timestamp(1)
first_value = '1'
metadata = {'First': [first_value, first_timestamp]}
broker.update_metadata(metadata)
self.assertFalse(mock.called)
def test_metadata_with_max_count(self):
metadata = {}
for c in range(MAX_META_COUNT):
key = 'X-Account-Meta-F{0}'.format(c)
metadata[key] = ('B', normalize_timestamp(1))
key = 'X-Account-Meta-Foo'.format(c)
metadata[key] = ('', normalize_timestamp(1))
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException:
self.fail('Unexpected HTTPException')
def test_metadata_raises_exception_on_non_utf8(self):
def try_validate(metadata):
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException as e:
self.assertEqual(str(e), '400 Bad Request')
else:
self.fail('HTTPException not raised')
ts = normalize_timestamp(1)
try_validate({'X-Account-Meta-Foo': (b'\xff', ts)})
try_validate({b'X-Container-Meta-\xff': ('bar', ts)})
def test_metadata_raises_exception_over_max_count(self):
metadata = {}
for c in range(MAX_META_COUNT + 1):
key = 'X-Account-Meta-F{0}'.format(c)
metadata[key] = ('B', normalize_timestamp(1))
message = ''
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException as e:
message = str(e)
self.assertEqual(message, '400 Bad Request')
def test_metadata_with_max_overall_size(self):
metadata = {}
metadata_value = 'v' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (MAX_META_OVERALL_SIZE - 4
- MAX_META_VALUE_LENGTH):
size += 4 + MAX_META_VALUE_LENGTH
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
normalize_timestamp(1))
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
metadata['X-Account-Meta-k'] = (
'v' * (MAX_META_OVERALL_SIZE - size - 1),
normalize_timestamp(1))
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException:
self.fail('Unexpected HTTPException')
def test_metadata_raises_exception_over_max_overall_size(self):
metadata = {}
metadata_value = 'k' * MAX_META_VALUE_LENGTH
size = 0
x = 0
while size < (MAX_META_OVERALL_SIZE - 4
- MAX_META_VALUE_LENGTH):
size += 4 + MAX_META_VALUE_LENGTH
metadata['X-Account-Meta-%04d' % x] = (metadata_value,
normalize_timestamp(1))
x += 1
if MAX_META_OVERALL_SIZE - size > 1:
metadata['X-Account-Meta-k'] = (
'v' * (MAX_META_OVERALL_SIZE - size - 1),
normalize_timestamp(1))
metadata['X-Account-Meta-k2'] = ('v', normalize_timestamp(1))
message = ''
try:
DatabaseBroker.validate_metadata(metadata)
except HTTPException as e:
message = str(e)
self.assertEqual(message, '400 Bad Request')
def test_possibly_quarantine_disk_error(self):
dbpath = os.path.join(self.testdir, 'dev', 'dbs', 'par', 'pre', 'db')
mkdirs(dbpath)
qpath = os.path.join(self.testdir, 'dev', 'quarantined', 'tests', 'db')
broker = DatabaseBroker(os.path.join(dbpath, '1.db'))
broker.db_type = 'test'
def stub():
raise sqlite3.OperationalError('disk I/O error')
try:
stub()
except Exception:
try:
broker.possibly_quarantine(*sys.exc_info())
except Exception as exc:
self.assertEqual(
str(exc),
'Quarantined %s to %s due to disk error '
'while accessing database' %
(dbpath, qpath))
else:
self.fail('Expected an exception to be raised')
if __name__ == '__main__':
unittest.main()
| |
# -*- coding: utf-8 -*-
from itertools import count
import os
import logging
import warnings
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
from django_extensions.settings import SQLITE_ENGINES, POSTGRESQL_ENGINES, MYSQL_ENGINES
from django_extensions.management.mysql import parse_mysql_cnf
from django_extensions.management.utils import signalcommand
from django_extensions.utils.deprecation import RemovedInNextVersionWarning
class Command(BaseCommand):
help = "Drops test database for this project."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
'--noinput', action='store_false', dest='interactive',
default=True, help='Tells Django to NOT prompt the user for input of any kind.'
)
parser.add_argument(
'-U', '--user', action='store', dest='user', default=None,
help='Use another user for the database then defined in settings.py'
)
parser.add_argument(
'-P', '--password', action='store', dest='password', default=None,
help='Use another password for the database then defined in settings.py'
)
parser.add_argument(
'-D', '--dbname', action='store', dest='dbname', default=None,
help='Use another database name then defined in settings.py'
)
parser.add_argument(
'-R', '--router', action='store', dest='router', default=DEFAULT_DB_ALIAS,
help='Use this router-database other then defined in settings.py'
)
parser.add_argument(
'--database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to run command for. Defaults to the "%s" database.' % DEFAULT_DB_ALIAS,
)
@signalcommand
def handle(self, *args, **options):
"""Drop test database for this project."""
database = options['database']
if options['router'] != DEFAULT_DB_ALIAS:
warnings.warn("--router is deprecated. You should use --database.", RemovedInNextVersionWarning, stacklevel=2)
database = options['router']
dbinfo = settings.DATABASES.get(database)
if dbinfo is None:
raise CommandError("Unknown database %s" % database)
engine = dbinfo.get('ENGINE')
user = password = database_name = database_host = database_port = ''
if engine == 'mysql':
(user, password, database_name, database_host, database_port) = parse_mysql_cnf(dbinfo)
user = options['user'] or dbinfo.get('USER') or user
password = options['password'] or dbinfo.get('PASSWORD') or password
try:
database_name = dbinfo['TEST']['NAME']
except KeyError:
database_name = None
if database_name is None:
database_name = TEST_DATABASE_PREFIX + (options['dbname'] or dbinfo.get('NAME'))
if database_name is None or database_name == '':
raise CommandError("You need to specify DATABASE_NAME in your Django settings file.")
database_host = dbinfo.get('HOST') or database_host
database_port = dbinfo.get('PORT') or database_port
verbosity = options["verbosity"]
if options['interactive']:
confirm = input("""
You have requested to drop all test databases.
This will IRREVERSIBLY DESTROY
ALL data in the database "{db_name}"
and all cloned test databases generated via
the "--parallel" flag (these are sequentially
named "{db_name}_1", "{db_name}_2", etc.).
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """.format(db_name=database_name))
else:
confirm = 'yes'
if confirm != 'yes':
print("Reset cancelled.")
return
def get_database_names(formatter):
"""
Return a generator of all possible test database names.
e.g., 'test_foo', 'test_foo_1', test_foo_2', etc.
formatter: func returning a clone db name given the primary db name
and the clone's number, e.g., 'test_foo_1' for mysql/postgres, and
'test_foo_1..sqlite3' for sqlite (re: double dots, see comments).
"""
yield database_name
yield from (formatter(database_name, n) for n in count(1))
if engine in SQLITE_ENGINES:
# By default all sqlite test databases are created in memory.
# There will only be database files to delete if the developer has
# specified a test database name, which forces files to be written
# to disk.
logging.info("Unlinking %s databases" % engine)
def format_filename(name, number):
filename, ext = os.path.splitext(name)
# Since splitext() includes the dot in 'ext', the inclusion of
# the dot in the format string below is incorrect and creates a
# double dot. Django makes this mistake, so it must be
# replicated here. If fixed in Django, this code should be
# updated accordingly.
# Reference: https://code.djangoproject.com/ticket/32582
return '{}_{}.{}'.format(filename, number, ext)
try:
for db_name in get_database_names(format_filename):
if not os.path.isfile(db_name):
break
logging.info('Unlinking database named "%s"' % db_name)
os.unlink(db_name)
except OSError:
return
elif engine in MYSQL_ENGINES:
import MySQLdb as Database
kwargs = {
'user': user,
'passwd': password,
}
if database_host.startswith('/'):
kwargs['unix_socket'] = database_host
else:
kwargs['host'] = database_host
if database_port:
kwargs['port'] = int(database_port)
connection = Database.connect(**kwargs)
cursor = connection.cursor()
for db_name in get_database_names('{}_{}'.format):
exists_query = \
"SELECT SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA WHERE SCHEMA_NAME='%s';" \
% db_name
row_count = cursor.execute(exists_query)
if row_count < 1:
break
drop_query = 'DROP DATABASE IF EXISTS `%s`' % db_name
logging.info('Executing: "' + drop_query + '"')
cursor.execute(drop_query)
elif engine in POSTGRESQL_ENGINES:
import psycopg2 as Database # NOQA
conn_params = {'database': 'template1'}
if user:
conn_params['user'] = user
if password:
conn_params['password'] = password
if database_host:
conn_params['host'] = database_host
if database_port:
conn_params['port'] = database_port
connection = Database.connect(**conn_params)
connection.set_isolation_level(0) # autocommit false
cursor = connection.cursor()
for db_name in get_database_names('{}_{}'.format):
exists_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname='%s';" \
% db_name
try:
cursor.execute(exists_query)
# NOTE: Unlike MySQLdb, the psycopg2 cursor does not return the row count
# however both cursors provide it as a property
if cursor.rowcount < 1:
break
drop_query = "DROP DATABASE IF EXISTS \"%s\";" % db_name
logging.info('Executing: "' + drop_query + '"')
cursor.execute(drop_query)
except Database.ProgrammingError as e:
logging.exception("Error: %s" % str(e))
return
else:
raise CommandError("Unknown database engine %s" % engine)
if verbosity >= 2 or options['interactive']:
print("Reset successful.")
| |
import json
from mitmproxy.test import tflow
import os
import shlex
from mitmproxy import options
from mitmproxy import contentviews
from mitmproxy import proxy
from mitmproxy.addons import script
from mitmproxy import master
from mitmproxy.test import tutils
from mitmproxy.net.http import Headers
from mitmproxy.net.http import cookies
from . import mastertest
example_dir = tutils.test_data.push("../examples")
class ScriptError(Exception):
pass
class RaiseMaster(master.Master):
def add_log(self, e, level):
if level in ("warn", "error"):
raise ScriptError(e)
def tscript(cmd, args=""):
o = options.Options()
cmd = example_dir.path(cmd) + " " + args
m = RaiseMaster(o, proxy.DummyServer())
sc = script.Script(cmd)
m.addons.add(sc)
return m, sc
class TestScripts(mastertest.MasterTest):
def test_add_header(self):
m, _ = tscript("simple/add_header.py")
f = tflow.tflow(resp=tutils.tresp())
m.response(f)
assert f.response.headers["newheader"] == "foo"
def test_custom_contentviews(self):
m, sc = tscript("simple/custom_contentview.py")
swapcase = contentviews.get("swapcase")
_, fmt = swapcase(b"<html>Test!</html>")
assert any(b'tEST!' in val[0][1] for val in fmt)
def test_iframe_injector(self):
with tutils.raises(ScriptError):
tscript("simple/modify_body_inject_iframe.py")
m, sc = tscript("simple/modify_body_inject_iframe.py", "http://example.org/evil_iframe")
f = tflow.tflow(resp=tutils.tresp(content=b"<html><body>mitmproxy</body></html>"))
m.response(f)
content = f.response.content
assert b'iframe' in content and b'evil_iframe' in content
def test_modify_form(self):
m, sc = tscript("simple/modify_form.py")
form_header = Headers(content_type="application/x-www-form-urlencoded")
f = tflow.tflow(req=tutils.treq(headers=form_header))
m.request(f)
assert f.request.urlencoded_form["mitmproxy"] == "rocks"
f.request.headers["content-type"] = ""
m.request(f)
assert list(f.request.urlencoded_form.items()) == [("foo", "bar")]
def test_modify_querystring(self):
m, sc = tscript("simple/modify_querystring.py")
f = tflow.tflow(req=tutils.treq(path="/search?q=term"))
m.request(f)
assert f.request.query["mitmproxy"] == "rocks"
f.request.path = "/"
m.request(f)
assert f.request.query["mitmproxy"] == "rocks"
def test_arguments(self):
m, sc = tscript("simple/script_arguments.py", "mitmproxy rocks")
f = tflow.tflow(resp=tutils.tresp(content=b"I <3 mitmproxy"))
m.response(f)
assert f.response.content == b"I <3 rocks"
def test_redirect_requests(self):
m, sc = tscript("simple/redirect_requests.py")
f = tflow.tflow(req=tutils.treq(host="example.org"))
m.request(f)
assert f.request.host == "mitmproxy.org"
def test_send_reply_from_proxy(self):
m, sc = tscript("simple/send_reply_from_proxy.py")
f = tflow.tflow(req=tutils.treq(host="example.com", port=80))
m.request(f)
assert f.response.content == b"Hello World"
def test_dns_spoofing(self):
m, sc = tscript("complex/dns_spoofing.py")
original_host = "example.com"
host_header = Headers(host=original_host)
f = tflow.tflow(req=tutils.treq(headers=host_header, port=80))
m.requestheaders(f)
# Rewrite by reverse proxy mode
f.request.scheme = "https"
f.request.host = "mitmproxy.org"
f.request.port = 443
m.request(f)
assert f.request.scheme == "http"
assert f.request.host == original_host
assert f.request.port == 80
assert f.request.headers["Host"] == original_host
class TestHARDump:
def flow(self, resp_content=b'message'):
times = dict(
timestamp_start=746203272,
timestamp_end=746203272,
)
# Create a dummy flow for testing
return tflow.tflow(
req=tutils.treq(method=b'GET', **times),
resp=tutils.tresp(content=resp_content, **times)
)
def test_no_file_arg(self):
with tutils.raises(ScriptError):
tscript("complex/har_dump.py")
def test_simple(self):
with tutils.tmpdir() as tdir:
path = os.path.join(tdir, "somefile")
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", self.flow())
m.addons.remove(sc)
with open(path, "r") as inp:
har = json.load(inp)
assert len(har["log"]["entries"]) == 1
def test_base64(self):
with tutils.tmpdir() as tdir:
path = os.path.join(tdir, "somefile")
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", self.flow(resp_content=b"foo" + b"\xFF" * 10))
m.addons.remove(sc)
with open(path, "r") as inp:
har = json.load(inp)
assert har["log"]["entries"][0]["response"]["content"]["encoding"] == "base64"
def test_format_cookies(self):
m, sc = tscript("complex/har_dump.py", "-")
format_cookies = sc.ns.format_cookies
CA = cookies.CookieAttrs
f = format_cookies([("n", "v", CA([("k", "v")]))])[0]
assert f['name'] == "n"
assert f['value'] == "v"
assert not f['httpOnly']
assert not f['secure']
f = format_cookies([("n", "v", CA([("httponly", None), ("secure", None)]))])[0]
assert f['httpOnly']
assert f['secure']
f = format_cookies([("n", "v", CA([("expires", "Mon, 24-Aug-2037 00:00:00 GMT")]))])[0]
assert f['expires']
def test_binary(self):
f = self.flow()
f.request.method = "POST"
f.request.headers["content-type"] = "application/x-www-form-urlencoded"
f.request.content = b"foo=bar&baz=s%c3%bc%c3%9f"
f.response.headers["random-junk"] = bytes(range(256))
f.response.content = bytes(range(256))
with tutils.tmpdir() as tdir:
path = os.path.join(tdir, "somefile")
m, sc = tscript("complex/har_dump.py", shlex.quote(path))
m.addons.invoke(m, "response", f)
m.addons.remove(sc)
with open(path, "r") as inp:
har = json.load(inp)
assert len(har["log"]["entries"]) == 1
| |
'''
nlp:part of the wordfish python package: extracting relationships of terms from corpus
functions for simple natural language processing
'''
from textblob import TextBlob, Word
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.stem import *
import nltk.data
import numpy
import pandas
import gensim
import re
def remove_nonenglish_chars(text):
return re.sub("[^a-zA-Z]", " ", text)
def text2sentences(text,remove_non_english_chars=True):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
if remove_non_english_chars:
text = remove_nonenglish_chars(text)
for s in tokenizer.tokenize(text):
yield s
def processText(text):
'''combines text2sentences and sentence2words'''
vector = []
for line in text2sentences(text):
words = sentence2words(line)
vector = vector + words
return vector
def sentence2words(sentence,remove_stop_words=True,lower=True):
if isinstance(sentence,list): sentence = sentence[0]
re_white_space = re.compile("\s+")
stop_words = set(stopwords.words("english"))
if lower: sentence = sentence.lower()
words = re_white_space.split(sentence.strip())
if remove_stop_words:
words = [w for w in words if w not in stop_words]
return words
def do_stem(words,return_unique=True,remove_non_english_words=True):
'''do_stem
Parameters
==========
words: str/list
one or more words to be stemmed
return_unique: boolean (default True)
return unique terms
'''
stemmer = PorterStemmer()
if isinstance(words,str):
words = [words]
stems = []
for word in words:
if remove_non_english_words:
word = re.sub("[^a-zA-Z]", " ", word)
stems.append(stemmer.stem(word))
if return_unique:
return numpy.unique([s.lower() for s in stems]).tolist()
else:
return stems
def get_total_words(text):
'''get_total_words:
get total words in a text (dict, string, or list)
Parameters
==========
text: str,dict,list
some text content to parse to count total words
Returns
=======
totalwords: int
total count of words
'''
totalwords = 0
# Dictionary
if isinstance(text,dict):
for label,sentences in text.iteritems():
if isinstance(sentences,str):
sentences = [sentences]
for sentence in sentences:
blob = TextBlob(sentence)
words = do_stem(blob.words)
totalwords += len(words)
return totalwords
# String or list
elif isinstance(text,str):
text = [text]
for sentence in text:
blob = TextBlob(sentence)
words = do_stem(blob.words)
totalwords += len(words)
return totalwords
def get_term_counts(terms,text):
'''get_term_counts:
a wrapper for get_term_counts_dict and get_term_counts_list
will return a pandas data frame of counts for a list of terms of interst
Parameters
==========
text: dict,list,str
some text content to parse to count a number of terms
terms: str,list
one or more terms to be stemmed and counted in the text
Returns
=======
totalwords: int
total count of words
'''
if isinstance(text,dict):
return get_term_counts_dict(terms,text)
elif isinstance(text,str):
text = [text]
elif isinstance(text,list):
return get_term_counts_list(terms,text)
def get_term_counts_list(terms,text):
# Convert words into stems
stems = do_stem(terms)
# data frame hold counts
counts = pandas.DataFrame(0,columns=["count"],index=stems)
for sentence in text:
blob = TextBlob(sentence)
words = do_stem(blob.words)
words = [w for w in words if w in stems]
counts.loc[words] = counts.loc[words] + 1
return counts
def get_term_counts_dict(terms,text):
# Convert words into stems
stems = do_stem(terms)
# data frame hold counts
counts = pandas.DataFrame(0,columns=["count"],index=stems)
for label,sentences in text.iteritems():
if isinstance(sentences,str):
sentences = [sentences]
for sentence in sentences:
blob = TextBlob(sentence)
words = do_stem(blob.words)
words = [w for w in words if w in stems]
counts.loc[words] = counts.loc[words] + 1
return counts
# Return list of stemmed phrases
def stem_phrases(phrases):
stemmed = []
for phrase in phrases:
phrase = phrase.split(" ")
if isinstance(phrase,str):
phrase = [phrase]
single_stemmed = do_stem(phrase)
stemmed.append(" ".join(single_stemmed).encode("utf-8"))
return stemmed
def get_match(phrasematch,entirephrase,found_indices):
'''
get_match:
Function to get a match: start, length, text, from a sentence
Returns dictionary with:
start_index
length
text
found_indices: updated binary [0,1] list of found indices in entirephrase
'''
full_concept = phrasematch.split(" ")
foundmatch = True
# We should not find words that have already been found :)
findices = [i for i in range(0,len(found_indices)) if found_indices[i] == 1]
for found_index in findices:
entirephrase[found_index] = "XXXXXXXXXXXXXXXX"
indices = []
for word in full_concept:
if word in entirephrase:
indices.append(entirephrase.index(word))
# Missing any one word, not a match
else:
foundmatch = False
if len(numpy.unique(indices)) == len(full_concept):
for i in range(0,len(indices)-1):
# Not in chronological order +1, not a match
if indices[i]+1 != indices[i+1]:
foundmatch=False
# Missing any one word, not a match
else:
foundmatch = False
if foundmatch == True:
start_index = entirephrase.index(full_concept[0])
length = len(full_concept)
text = entirephrase[start_index:start_index+length]
# Update found indices
found_indices[start_index:start_index+length]=1
else:
start_index = 0
length = 0
text = ""
result = {"start_index":start_index,
"length":length,
"text":text,
"found_indices":found_indices}
return result
def find_phrases(words,vocabulary,repeat=1):
'''
words: a list of words
vocabulary: a list of words / phrases to find in the words
repeat: the number of times to run over the phrase
(in case of repeats of same in one sentence)
Returns:
(words_index,vocab_index,word,vocab)
'''
vocabulary = numpy.unique(vocabulary).tolist()
vocabulary = [v.encode("utf-8") for v in vocabulary]
# We will stem phrases, and search for them across the stemmed words
vocab_stemmed = stem_phrases(vocabulary)
stemmed = [s.encode("utf-8") for s in do_stem(words,return_unique=False)]
# Make a long regular expression
regexp = "*|".join(vocab_stemmed) + "*"
phrases = []
# Make lookups to return to original vocab and terms
vocab_lookup = make_lookup(vocabulary,vocab_stemmed)
words_lookup = make_lookup(words,stemmed)
# We run it twice in case of repeats in a sentence
for r in range(0,repeat):
# Search the sentence for any concepts:
if re.search(regexp," ".join(stemmed)):
for c in range(0,len(stemmed)):
for v in range(len(vocab_stemmed)):
single_stemmed = vocab_stemmed[v]
if re.match("%s" %(stemmed[c]),single_stemmed):
phrases.append((c, v, words_lookup[stemmed[c]],vocab_lookup[vocab_stemmed[v]]))
return phrases
def make_lookup(original_list,new_list):
lookup = dict()
for x in range(len(new_list)):
lookup[new_list[x]] = original_list[x]
return lookup
| |
#!/usr/bin/python
import re
class CameraFrame(object):
STX = ''
ETX = ''
_PCT_C_RE = re.compile(r'%(\d*)c')
_PCT_D_RE = re.compile(r'%(\d*)d')
_PCT_S_RE = re.compile(r'%s')
def __init__(self, desc, confirm_format, control_format, reply_format,
checksum=False):
self.desc = desc
self.confirm_format = confirm_format
self.control_format = control_format
self.reply_format = reply_format
self.checksum = checksum
self.confirm_re = self._FormatToRE(confirm_format, checksum)
self.control_re = self._FormatToRE(control_format, checksum)
self.reply_re = self._FormatToRE(reply_format, checksum)
def _Checksum(self, cmd):
cksum = sum([ord(x) for x in cmd]) % 0x100
if cksum == 0:
cksum = 1
if cksum == 0x0D:
cksum = 0x0E
return cksum
def EncodeConfirmation(self, args):
cmd = self.confirm_format % args
if self.checksum:
cksum = self._Checksum(cmd)
cksum = ('%02X' % cksum).decode('hex')
cmd = cmd + cksum
return self.STX + cmd + self.ETX
def EncodeControl(self, args):
cmd = self.control_format % args
if self.checksum:
cksum = self._Checksum(cmd)
cksum = ('%02X' % cksum).decode('hex')
cmd = cmd + cksum
return self.STX + cmd + self.ETX
def DecodeReply(self, cmd):
"""Attempt to decode cmd and return values, failure raises ValueError."""
if self.STX:
if cmd[0] != self.STX:
raise ValueError('Incorrect framing')
cmd = cmd[1:]
if self.ETX:
if cmd[-1] != self.ETX:
raise ValueError('Incorrect framing')
cmd = cmd[:-1]
result = self.reply_re.match(cmd)
if result is None:
raise ValueError('No match')
# Replies don't have checksums.
#if self.checksum:
# cksum = self._Checksum(cmd[:-1])
# if cksum != ord(cmd[-1]):
# raise ValueError('Checksum mismatch (%02X/%02X)' %
# (cksum, ord(cmd[-1])))
return result
def _FormatToRE(self, fmt, checksum):
"""Convert a printf style format string into a regular expression."""
if not fmt:
return
if checksum:
fmt = fmt + '%c'
def percent_c_repl(match):
count = match.group(1)
if count:
return r'(.{%s})' % count
return r'(.)'
fmt = self._PCT_C_RE.sub(percent_c_repl, fmt)
def percent_d_repl(match):
count = match.group(1)
if count:
return r'(\d{%s})' % count.lstrip('0')
return r'(\d)'
fmt = self._PCT_D_RE.sub(percent_d_repl, fmt)
def percent_s_repl(match):
return r'(.*)'
fmt = self._PCT_S_RE.sub(percent_s_repl, fmt)
return re.compile(fmt)
class CCP(CameraFrame):
STX = '\x02'
ETX = '\x03'
class PT(CameraFrame):
STX=''
ETX='\r'
class HE100(object):
commands = (CCP('model number', 'QID', None, 'OID:%2c'),
CCP('software version', 'QSV', None, 'OSV:%s'), # Unsure of rlen
CCP('AWC/AWB', None, 'OWS', 'OWS'),
CCP('ABC/ABB', None, 'OAS', 'OAS'),
CCP('AWC mode', 'QAW', 'OAW:%c', 'OAW:%c'),
CCP('detail', 'QDT', 'ODT:%c', 'ODT:%c'),
CCP('gain up', 'QGU', 'OGU:%c', 'OGU:%c'),
CCP('shutter', 'QSH', 'OSH:%c', 'OSH:%c'),
CCP('synchro scan', 'QMS', 'OMS:%3c', 'OMS:%3c'), # Unsure of fmt
CCP('field/frame', 'QFF', None, 'OFF:%c'),
CCP('v.resolution', None, 'OFR:%c', 'OFR:%c'),
CCP('iris auto/manual', 'QRS', 'ORS:%c', 'ORS:%c'),
CCP('manual iris volume', 'QRV', 'ORV:%3c', 'ORV:%3c'), # fmt?
CCP('picture level', 'QSD:48', 'OSD:48:%2c', 'OSD:48:%2c'), # fmt?
CCP('light peak/avg', 'QPA', None, 'OPA:%2c'), # fmt?
CCP('light peak/avg', None, 'OPV:%2c', 'OPV:%2c'), # fmt?
CCP('light area', 'QAR', None, 'OAR:%c'),
CCP('light area', None, 'ORA:%c', 'ORA:%c'),
CCP('nega/posi', 'QNP', 'ONP:%c', 'ONP:%c'),
CCP('r pedestal', 'QRD', 'ORD:%2c', 'ORD:%2c'), # fmt?
CCP('b pedestal', 'QBD', 'OBD:%2c', 'OBD:%2c'), # fmt?
CCP('r gain', 'QGR', 'OGR:%2c', 'OGR:%2c'), # typo? fmt?
CCP('b gain', 'QGB', 'OGB:%2c', 'OGB:%2c'), # typo? fmt?
CCP('t pedestal', 'QTD', 'OTD:%2c', 'OTD:%2c'), # fmt?
CCP('h phase', 'QHP', 'OHP:%3c', 'OHP:%3c'), # fmt?
CCP('sc coarse', 'QSC', 'OSC:%c', 'OSC:%c'),
CCP('sc fine', 'QSN', 'OSN:%3c', 'OSN:%3c'), # fmt?
CCP('chroma level', 'QCG', 'OCG:%2c', 'OCG:%2c'),
CCP('scene file', 'QSF', 'OSF:%c', 'OSF:%c'),
CCP('scene file', None, 'XSF:%c', 'XSF:%c'),
CCP('gamma', 'QSD:00', 'OSD:00:%2c', 'OSD:00:%2c'), # fmt?...
CCP('knee point', 'QSD:08', 'OSD:08:%2c', 'OSD:08:%2c'),
CCP('white clip', 'QSD:09', 'OSD:09:%2c', 'OSD:09:%2c'),
CCP('h.dtl level h', 'QSD:0A', 'OSD:0A:%2c', 'OSD:0A:%2c'),
CCP('v.dtl level h', 'QSD:0E', 'OSD:0E:%2c', 'OSD:0E:%2c'),
CCP('h.dtl level l', 'QSD:12', 'OSD:12:%2c', 'OSD:12:%2c'),
CCP('v.dtl level l', 'QSD:16', 'OSD:16:%2c', 'OSD:16:%2c'),
CCP('detail band', 'QSD:1E', 'OSD:1E:%2c', 'OSD:1E:%2c'),
CCP('noise suppress', 'QSD:22', 'OSD:22:%2c', 'OSD:22:%2c'),
CCP('level dependent', 'QSD:26', 'OSD:26:%2c', 'OSD:26:%2c'),
CCP('chroma detail', 'QSD:2A', 'OSD:2A:%2c', 'OSD:2A:%2c'),
CCP('dark detail', 'QSD:2E', 'OSD:2E:%2c', 'OSD:2E:%2c'),
CCP('matrix r-g', 'QSD:2F', 'OSD:2F:%2c', 'OSD:2F:%2c'),
CCP('matrix r-b', 'QSD:30', 'OSD:30:%2c', 'OSD:30:%2c'),
CCP('matrix g-r', 'QSD:31', 'OSD:31:%2c', 'OSD:31:%2c'),
CCP('matrix g-b', 'QSD:32', 'OSD:32:%2c', 'OSD:32:%2c'),
CCP('matrix b-r', 'QSD:33', 'OSD:33:%2c', 'OSD:33:%2c'),
CCP('matrix b-g', 'QSD:34', 'OSD:34:%2c', 'OSD:34:%2c'),
CCP('flare r', 'QSD:35', 'OSD:35:%2c', 'OSD:35:%2c'),
CCP('flare g', 'QSD:36', 'OSD:36:%2c', 'OSD:36:%2c'),
CCP('flare b', 'QSD:37', 'OSD:37:%2c', 'OSD:37:%2c'),
CCP('flare sw', 'QSA:11', 'OSA:11:%2c', 'OSA:11:%2c'),
CCP('clean dnr', 'QSD:3A', 'OSD:3A:%2c', 'OSD:3A:%2c'),
CCP('2d lpf', 'QSD:3F', 'OSD:3F:%2c', 'OSD:3F:%2c'),
CCP('corner detail', 'QSD:43', 'OSD:43:%2c', 'OSD:43:%2c'),
CCP('precision detail', 'QSD:44', 'OSD:44:%2c', 'OSD:44:%2c'),
CCP('black stretch', 'QSD:46', 'OSD:46:%2c', 'OSD:46:%2c'),
CCP('high light chroma', 'QSD:49', 'OSD:49:%2c', 'OSD:49:%2c'),
CCP('flesh detail', 'QSD:4B', 'OSD:4B:%2c', 'OSD:4B:%2c'),
CCP('iris follow', 'QSD:4F', None, 'OSD:4F:%2c'),
CCP('contrast/gamma', 'QSD:50', 'OSD:50:%2c', 'OSD:50:%2c'),
CCP('flesh tone', 'QSD:52', 'OSD:52:%2c', 'OSD:52:%2c'),
CCP('detail select', 'QSD:54', 'OSD:54:%2c', 'OSD:54:%2c'),
CCP('noise suppress', 'QSD:55', 'OSD:55:%2c', 'OSD:55:%2c'),
CCP('flesh noise suppress', 'QSD:56', 'OSD:56:%2c', 'OSD:56:%2c'),
CCP('zebra indicator', 'QSD:60', 'OSD:60:%2c', 'OSD:60:%2c'),
CCP('zebra 1 level', 'QSD:61', 'OSD:61:%2c', 'OSD:61:%2c'),
CCP('zebra 2 level', 'QSD:62', 'OSD:62:%2c', 'OSD:62:%2c'),
CCP('safety zone', 'QSD:63', 'OSD:63:%2c', 'OSD:63:%2c'),
CCP('evf output', 'QSD:64', 'OSD:64:%2c', 'OSD:64:%2c'),
CCP('output select', 'QSD:65', 'OSD:65:%2c', 'OSD:65:%2c'),
CCP('charge time', 'QSD:68', 'OSD:68:%2c', 'OSD:68:%2c'),
CCP('agc max', 'QSD:69', 'OSD:69:%2c', 'OSD:69:%2c'),
CCP('aspect ratio', 'QSD:70', 'OSD:70:%2c', 'OSD:70:%2c'),
CCP('fan', 'QSD:71', 'OSD:71:%2c', 'OSD:71:%2c'),
CCP('atw speed', 'QSD:72', 'OSD:72:%2c', 'OSD:72:%2c'),
CCP('error 3', None, None, 'ER3:%3c'),
# More CCP to follow...
# Many of the following confirm patterns are guesses.
PT('power', '#O', '#O%c', 'p%c'),
PT('pan speed', '#P', '#P%2c', None),
PT('tilt speed', '#T', '#T%2c', None),
PT('pan/tilt position', '#U', '#U%4c%4c', 'u%4c%4c',
checksum=True),
PT('zoom speed', '#Z', '#Z%2c', None),
PT('zoom position x', '#AXZ', '#AXZ%3c', 'axz%3c'),
PT('zoom position y', '#AYZ', '#AYZ%4c%4c', 'ayz%3c',
checksum=True),
PT('focus speed', '#F', '#F%2c', None),
PT('focus position x', '#AXF', '#AXF%3c', 'axf%3c'),
PT('focus position y', '#AYF', '#AYF%3c', 'ayf%3c'),
PT('roll speed', '#RO', '#RO%2c', None),
PT('iris', '#I', '#I%2c', None),
PT('iris x', '#AXI', '#AXI%3c', 'axi%3c'),
PT('iris y', '#AYI', '#AYI%3c', 'ayi%3c'), # typo?
PT('extender/af', '#D1', '#D1%c', 'd1%c'),
PT('nd', '#D2', '#D2%c', 'd2%c'),
PT('iris auto/manual', '#D3', '#D3%c', 'd3%c'),
PT('lamp control', '#D4', '#D4%c', 'd4%c'),
PT('lamp alarm', '#D5', None, 'd5%c'),
PT('option sw', '#D6', '#D6%c', 'd6%c'),
PT('defroster', '#D7', '#D7%c', 'd7%c'),
PT('wiper', '#D8', '#D8%c', 'd8%c'),
PT('heater/fan', '#D9', '#D9%c', 'd9%c'),
PT('tally', '#DA', '#DA%c', 'dA%c'),
PT('save preset memory', '#S', '#M%2c', 's%2c'),
PT('recall preset memory', None, '#R%2c', 's%2c'),
PT('preset complete notification', None, None, 'q%2c'),
PT('preset mode', None, '#RT%c', 'rt%c'),
PT('limit', None, '#L%c', 'l%c'),
PT('landing', None, '#N%c', None),
PT('request zoom position', '#GZ', None, 'gz%3c'),
PT('request focus position', '#GF', None, 'gf%3c'),
PT('request iris position', '#GI', None, 'gi%3c%c'),
PT('tilt range', None, '#AGL%c', 'aGL%c'),
PT('software version', '#V?', None, '%s'),
)
def main():
he100 = HE100()
if __name__ == '__main__':
main()
| |
"""
The Flask frontend for the GA4GH API.
TODO Document properly.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import datetime
import socket
import urlparse
import functools
import flask
import flask.ext.cors as cors
import humanize
import werkzeug
import oic
import oic.oauth2
import oic.oic.message as message
import requests
import ga4gh
import ga4gh.backend as backend
import ga4gh.datamodel as datamodel
import ga4gh.protocol as protocol
import ga4gh.exceptions as exceptions
MIMETYPE = "application/json"
SEARCH_ENDPOINT_METHODS = ['POST', 'OPTIONS']
SECRET_KEY_LENGTH = 24
app = flask.Flask(__name__)
assert not hasattr(app, 'urls')
app.urls = []
class NoConverter(werkzeug.routing.BaseConverter):
"""
A converter that allows the routing matching algorithm to not
match on certain literal terms
This is needed because if there are e.g. two routes:
/<version>/callsets/search
/<version>/callsets/<id>
A request for /someVersion/callsets/search will get routed to
the second, which is not what we want.
"""
def __init__(self, map, *items):
werkzeug.routing.BaseConverter.__init__(self, map)
self.items = items
def to_python(self, value):
if value in self.items:
raise werkzeug.routing.ValidationError()
return value
app.url_map.converters['no'] = NoConverter
class Version(object):
"""
A major/minor/revision version tag
"""
currentString = "current"
@classmethod
def isCurrentVersion(cls, versionString):
if versionString == cls.currentString:
return True
return (Version.parseString(versionString) ==
Version.parseString(protocol.version))
@classmethod
def parseString(cls, versionString):
versions = versionString.strip('vV').split('.')
return Version(*versions)
def __init__(self, major, minor, revision):
self.version = (major, minor, revision)
def __cmp__(self, other):
return cmp(self.version, other.version)
def __hash__(self):
return hash(self.version)
def __eq__(self, other):
return self.version == other.version
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def getVersionForUrl(cls, versionString):
"""
Returns the specfied version string in a form suitable for using
within a URL. This involved prefixing with 'v'.
"""
ret = versionString
if not ret.startswith("v"):
ret = "v{}".format(versionString)
return ret
class ServerStatus(object):
"""
Generates information about the status of the server for display
"""
def __init__(self):
self.startupTime = datetime.datetime.now()
def getConfiguration(self):
"""
Returns a list of configuration (key, value) tuples
that are useful for users to view on an information page.
Note that we should be careful here not to leak sensitive
information. For example, keys and paths of data files should
not be returned.
"""
# TODO what other config keys are appropriate to export here?
keys = [
'DEBUG', 'REQUEST_VALIDATION', 'RESPONSE_VALIDATION',
'DEFAULT_PAGE_SIZE', 'MAX_RESPONSE_LENGTH',
]
return [(k, app.config[k]) for k in keys]
def getPreciseUptime(self):
"""
Returns the server precisely.
"""
return self.startupTime.strftime("%H:%M:%S %d %b %Y")
def getNaturalUptime(self):
"""
Returns the uptime in a human-readable format.
"""
return humanize.naturaltime(self.startupTime)
def getProtocolVersion(self):
"""
Returns the GA4GH protocol version we support.
"""
return protocol.version
def getServerVersion(self):
"""
Returns the software version of this server.
"""
return ga4gh.__version__
def getUrls(self):
"""
Returns the list of (httpMethod, URL) tuples that this server
supports.
"""
app.urls.sort()
return app.urls
def getDatasetIds(self):
"""
Returns the list of datasetIds for this backend
"""
return app.backend.getDatasetIds()
def getVariantSets(self, datasetId):
"""
Returns the list of variant sets for the dataset
"""
return app.backend.getDataset(datasetId).getVariantSets()
def getReadGroupSets(self, datasetId):
"""
Returns the list of ReadGroupSets for the dataset
"""
return app.backend.getDataset(datasetId).getReadGroupSets()
def getReferenceSets(self):
"""
Returns the list of ReferenceSets for this server.
"""
return app.backend.getReferenceSets()
def reset():
"""
Resets the flask app; used in testing
"""
app.config.clear()
configStr = 'ga4gh.serverconfig:FlaskDefaultConfig'
app.config.from_object(configStr)
def configure(configFile=None, baseConfig="ProductionConfig",
port=8000, extraConfig={}):
"""
TODO Document this critical function! What does it do? What does
it assume?
"""
configStr = 'ga4gh.serverconfig:{0}'.format(baseConfig)
app.config.from_object(configStr)
if os.environ.get('GA4GH_CONFIGURATION') is not None:
app.config.from_envvar('GA4GH_CONFIGURATION')
if configFile is not None:
app.config.from_pyfile(configFile)
app.config.update(extraConfig.items())
# Setup file handle cache max size
datamodel.fileHandleCache.setMaxCacheSize(
app.config["FILE_HANDLE_CACHE_MAX_SIZE"])
# Setup CORS
cors.CORS(app, allow_headers='Content-Type')
app.serverStatus = ServerStatus()
# Allocate the backend
# TODO is this a good way to determine what type of backend we should
# instantiate? We should think carefully about this. The approach of
# using the special strings __SIMULATED__ and __EMPTY__ seems OK for
# now, but is certainly not ideal.
dataSource = app.config["DATA_SOURCE"]
if dataSource == "__SIMULATED__":
randomSeed = app.config["SIMULATED_BACKEND_RANDOM_SEED"]
numCalls = app.config["SIMULATED_BACKEND_NUM_CALLS"]
variantDensity = app.config["SIMULATED_BACKEND_VARIANT_DENSITY"]
numVariantSets = app.config["SIMULATED_BACKEND_NUM_VARIANT_SETS"]
numReferenceSets = app.config[
"SIMULATED_BACKEND_NUM_REFERENCE_SETS"]
numReferencesPerReferenceSet = app.config[
"SIMULATED_BACKEND_NUM_REFERENCES_PER_REFERENCE_SET"]
numAlignments = app.config[
"SIMULATED_BACKEND_NUM_ALIGNMENTS_PER_READ_GROUP"]
theBackend = backend.SimulatedBackend(
randomSeed, numCalls, variantDensity, numVariantSets,
numReferenceSets, numReferencesPerReferenceSet, numAlignments)
elif dataSource == "__EMPTY__":
theBackend = backend.EmptyBackend()
else:
theBackend = backend.FileSystemBackend(dataSource)
theBackend.setRequestValidation(app.config["REQUEST_VALIDATION"])
theBackend.setResponseValidation(app.config["RESPONSE_VALIDATION"])
theBackend.setDefaultPageSize(app.config["DEFAULT_PAGE_SIZE"])
theBackend.setMaxResponseLength(app.config["MAX_RESPONSE_LENGTH"])
app.backend = theBackend
app.secret_key = os.urandom(SECRET_KEY_LENGTH)
app.oidcClient = None
app.tokenMap = None
app.myPort = port
if "OIDC_PROVIDER" in app.config:
# The oic client. If we're testing, we don't want to verify
# SSL certificates
app.oidcClient = oic.oic.Client(
verify_ssl=('TESTING' not in app.config))
app.tokenMap = {}
try:
app.oidcClient.provider_config(app.config['OIDC_PROVIDER'])
except requests.exceptions.ConnectionError:
configResponse = message.ProviderConfigurationResponse(
issuer=app.config['OIDC_PROVIDER'],
authorization_endpoint=app.config['OIDC_AUTHZ_ENDPOINT'],
token_endpoint=app.config['OIDC_TOKEN_ENDPOINT'],
revocation_endpoint=app.config['OIDC_TOKEN_REV_ENDPOINT'])
app.oidcClient.handle_provider_config(configResponse,
app.config['OIDC_PROVIDER'])
# The redirect URI comes from the configuration.
# If we are testing, then we allow the automatic creation of a
# redirect uri if none is configured
redirectUri = app.config.get('OIDC_REDIRECT_URI')
if redirectUri is None and 'TESTING' in app.config:
redirectUri = 'https://{0}:{1}/oauth2callback'.format(
socket.gethostname(), app.myPort)
app.oidcClient.redirect_uris = [redirectUri]
if redirectUri is []:
raise exceptions.ConfigurationException(
'OIDC configuration requires a redirect uri')
# We only support dynamic registration while testing.
if ('registration_endpoint' in app.oidcClient.provider_info and
'TESTING' in app.config):
app.oidcClient.register(
app.oidcClient.provider_info["registration_endpoint"],
redirect_uris=[redirectUri])
else:
response = message.RegistrationResponse(
client_id=app.config['OIDC_CLIENT_ID'],
client_secret=app.config['OIDC_CLIENT_SECRET'],
redirect_uris=[redirectUri],
verify_ssl=False)
app.oidcClient.store_registration_info(response)
def getFlaskResponse(responseString, httpStatus=200):
"""
Returns a Flask response object for the specified data and HTTP status.
"""
return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE)
def handleHttpPost(request, endpoint):
"""
Handles the specified HTTP POST request, which maps to the specified
protocol handler endpoint and protocol request class.
"""
if request.mimetype != MIMETYPE:
raise exceptions.UnsupportedMediaTypeException()
responseStr = endpoint(request.get_data())
return getFlaskResponse(responseStr)
def handleList(id_, endpoint, request):
"""
Handles the specified HTTP GET request, mapping to a list request
"""
responseStr = endpoint(id_, request.args)
return getFlaskResponse(responseStr)
def handleHttpGet(id_, endpoint):
"""
Handles the specified HTTP GET request, which maps to the specified
protocol handler endpoint and protocol request class
"""
responseStr = endpoint(id_)
return getFlaskResponse(responseStr)
def handleHttpOptions():
"""
Handles the specified HTTP OPTIONS request.
"""
response = getFlaskResponse("")
response.headers.add("Access-Control-Request-Methods", "GET,POST,OPTIONS")
return response
@app.errorhandler(Exception)
def handleException(exception):
"""
Handles an exception that occurs somewhere in the process of handling
a request.
"""
if app.config['DEBUG']:
app.log_exception(exception)
serverException = exception
if not isinstance(exception, exceptions.BaseServerException):
serverException = exceptions.getServerError(exception)
responseStr = serverException.toProtocolElement().toJsonString()
return getFlaskResponse(responseStr, serverException.httpStatus)
def assertCorrectVersion(version):
if not Version.isCurrentVersion(version):
raise exceptions.VersionNotSupportedException()
def startLogin():
"""
If we are not logged in, this generates the redirect URL to the OIDC
provider and returns the redirect response
:return: A redirect response to the OIDC provider
"""
flask.session["state"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH)
flask.session["nonce"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH)
args = {
"client_id": app.oidcClient.client_id,
"response_type": "code",
"scope": ["openid", "profile"],
"nonce": flask.session["nonce"],
"redirect_uri": app.oidcClient.redirect_uris[0],
"state": flask.session["state"]
}
result = app.oidcClient.do_authorization_request(
request_args=args, state=flask.session["state"])
return flask.redirect(result.url)
@app.before_request
def checkAuthentication():
"""
The request will have a parameter 'key' if it came from the command line
client, or have a session key of 'key' if it's the browser.
If the token is not found, start the login process.
If there is no oidcClient, we are running naked and we don't check.
If we're being redirected to the oidcCallback we don't check.
:returns None if all is ok (and the request handler continues as usual).
Otherwise if the key was in the session (therefore we're in a browser)
then startLogin() will redirect to the OIDC provider. If the key was in
the request arguments, we're using the command line and just raise an
exception.
"""
if app.oidcClient is None:
return
if flask.request.endpoint == 'oidcCallback':
return
key = flask.session.get('key') or flask.request.args.get('key')
if app.tokenMap.get(key) is None:
if 'key' in flask.request.args:
raise exceptions.NotAuthenticatedException()
else:
return startLogin()
def handleFlaskGetRequest(version, id_, flaskRequest, endpoint):
"""
Handles the specified flask request for one of the GET URLs
at the specified version. Invokes the specified endpoint to
generate a response.
"""
assertCorrectVersion(version)
if flaskRequest.method == "GET":
return handleHttpGet(id_, endpoint)
else:
raise exceptions.MethodNotAllowedException()
def handleFlaskListRequest(version, id_, flaskRequest, endpoint):
"""
Handles the specified flask list request for one of the GET URLs
at the specified version. Invokes the specified endpoint to
generate a response.
"""
assertCorrectVersion(version)
if flaskRequest.method == "GET":
return handleList(id_, endpoint, flaskRequest)
else:
raise exceptions.MethodNotAllowedException()
def handleFlaskPostRequest(version, flaskRequest, endpoint):
"""
Handles the specified flask request for one of the POST URLS
at the specified version. Invokes the specified endpoint to
generate a response.
"""
assertCorrectVersion(version)
if flaskRequest.method == "POST":
return handleHttpPost(flaskRequest, endpoint)
elif flaskRequest.method == "OPTIONS":
return handleHttpOptions()
else:
raise exceptions.MethodNotAllowedException()
class DisplayedRoute(object):
"""
Registers that a route should be displayed on the html page
"""
def __init__(
self, path, postMethod=False, pathDisplay=None):
self.path = path
self.methods = None
if postMethod:
methodDisplay = 'POST'
self.methods = SEARCH_ENDPOINT_METHODS
else:
methodDisplay = 'GET'
if pathDisplay is None:
pathDisplay = path
pathDisplay = pathDisplay.replace(
'<version>', protocol.version)
app.urls.append((methodDisplay, pathDisplay))
def __call__(self, func):
if self.methods is None:
app.add_url_rule(self.path, func.func_name, func)
else:
app.add_url_rule(
self.path, func.func_name, func, methods=self.methods)
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return result
return wrapper
@app.route('/')
def index():
return flask.render_template('index.html', info=app.serverStatus)
@app.route('/<version>')
def indexRedirect(version):
try:
isCurrentVersion = Version.isCurrentVersion(version)
except TypeError: # malformed "version string"
raise exceptions.PathNotFoundException()
if isCurrentVersion:
return index()
else:
raise exceptions.PathNotFoundException()
@DisplayedRoute('/<version>/references/<id>')
def getReference(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetReference)
@DisplayedRoute('/<version>/referencesets/<id>')
def getReferenceSet(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetReferenceSet)
@DisplayedRoute('/<version>/references/<id>/bases')
def listReferenceBases(version, id):
return handleFlaskListRequest(
version, id, flask.request, app.backend.runListReferenceBases)
@DisplayedRoute('/<version>/callsets/search', postMethod=True)
def searchCallSets(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchCallSets)
@DisplayedRoute('/<version>/readgroupsets/search', postMethod=True)
def searchReadGroupSets(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchReadGroupSets)
@DisplayedRoute('/<version>/reads/search', postMethod=True)
def searchReads(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchReads)
@DisplayedRoute('/<version>/referencesets/search', postMethod=True)
def searchReferenceSets(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchReferenceSets)
@DisplayedRoute('/<version>/references/search', postMethod=True)
def searchReferences(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchReferences)
@DisplayedRoute('/<version>/variantsets/search', postMethod=True)
def searchVariantSets(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchVariantSets)
@DisplayedRoute('/<version>/variants/search', postMethod=True)
def searchVariants(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchVariants)
@DisplayedRoute('/<version>/datasets/search', postMethod=True)
def searchDatasets(version):
return handleFlaskPostRequest(
version, flask.request, app.backend.runSearchDatasets)
@DisplayedRoute('/<version>/variantsets/<no(search):id>')
def getVariantSet(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetVariantSet)
@DisplayedRoute(
'/<version>/readgroupsets/<no(search):id>',
pathDisplay='/<version>/readgroupsets/<id>')
def getReadGroupSet(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetReadGroupSet)
@DisplayedRoute('/<version>/readgroups/<id>')
def getReadGroup(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetReadGroup)
@DisplayedRoute(
'/<version>/callsets/<no(search):id>',
pathDisplay='/<version>/callsets/<id>')
def getCallset(version, id):
return handleFlaskGetRequest(
version, id, flask.request, app.backend.runGetCallset)
@app.route('/oauth2callback', methods=['GET'])
def oidcCallback():
"""
Once the authorization provider has cleared the user, the browser
is returned here with a code. This function takes that code and
checks it with the authorization provider to prove that it is valid,
and get a bit more information about the user (which we don't use).
A token is generated and given to the user, and the authorization info
retrieved above is stored against this token. Later, when a client
connects with this token, it is assumed to be a valid user.
:return: A display of the authentication token to use in the client. If
OIDC is not configured, raises a NotImplementedException.
"""
if app.oidcClient is None:
raise exceptions.NotImplementedException()
response = dict(flask.request.args.iteritems(multi=True))
aresp = app.oidcClient.parse_response(
message.AuthorizationResponse,
info=response,
sformat='dict')
sessState = flask.session.get('state')
respState = aresp['state']
if (not isinstance(aresp, message.AuthorizationResponse) or
respState != sessState):
raise exceptions.NotAuthenticatedException()
args = {
"code": aresp['code'],
"redirect_uri": app.oidcClient.redirect_uris[0],
"client_id": app.oidcClient.client_id,
"client_secret": app.oidcClient.client_secret
}
atr = app.oidcClient.do_access_token_request(
scope="openid",
state=respState,
request_args=args)
if not isinstance(atr, message.AccessTokenResponse):
raise exceptions.NotAuthenticatedException()
atrDict = atr.to_dict()
if flask.session.get('nonce') != atrDict['id_token']['nonce']:
raise exceptions.NotAuthenticatedException()
key = oic.oauth2.rndstr(SECRET_KEY_LENGTH)
flask.session['key'] = key
app.tokenMap[key] = aresp["code"], respState, atrDict
# flask.url_for is broken. It relies on SERVER_NAME for both name
# and port, and defaults to 'localhost' if not found. Therefore
# we need to fix the returned url
indexUrl = flask.url_for('index', _external=True)
indexParts = list(urlparse.urlparse(indexUrl))
if ':' not in indexParts[1]:
indexParts[1] = '{}:{}'.format(socket.gethostname(), app.myPort)
indexUrl = urlparse.urlunparse(indexParts)
response = flask.redirect(indexUrl)
return response
# The below paths have not yet been implemented
@app.route('/<version>/variants/<no(search):id>')
def getVariant(version, id):
raise exceptions.NotImplementedException()
@app.route('/<version>/datasets/<no(search):id>')
def getDataset(version, id):
raise exceptions.NotImplementedException()
# The below methods ensure that JSON is returned for various errors
# instead of the default, html
@app.errorhandler(404)
def pathNotFoundHandler(errorString):
return handleException(exceptions.PathNotFoundException())
@app.errorhandler(405)
def methodNotAllowedHandler(errorString):
return handleException(exceptions.MethodNotAllowedException())
@app.errorhandler(403)
def notAuthenticatedHandler(errorString):
return handleException(exceptions.NotAuthenticatedException())
| |
import time, collections, uuid, struct, re, socket, weblist, twisted.web.server, twisted.web.static
from twisted.internet.protocol import Factory, ClientFactory, Protocol, DatagramProtocol
from twisted.internet import reactor
from expirationset import expirationset
class GameServer:
def __init__(self, server_id, lobby_id):
self.server_id = server_id
self.lobby_id = lobby_id
self.protocol = 0 # 0 = TCP, 1 = UDP
self.ipv4_endpoint = None # Tuple: (ipv4, port), as binary string and int
self.ipv6_endpoint = None # Tuple: (ipv6, port), as binary string and int
self.name = ""
self.slots = 0
self.players = 0
self.bots = 0
self.passworded = False
self.infos = {}
def __repr__(self):
retstr = "<GameServer, name="+self.name+", lobby_id="+str(self.lobby_id)
if(self.ipv4_endpoint is not None):
anonip = self.ipv4_endpoint[0][:-1]+"\0"
retstr += ", ipv4_endpoint=" + socket.inet_ntoa(anonip)+":"+str(self.ipv4_endpoint[1])
if(self.ipv6_endpoint is not None):
anonip = (self.ipv6_endpoint[0][:-10]+"\0"*10, self.ipv6_endpoint[1])
retstr += ", ipv6_endpoint=" + str(anonip)
return retstr+">"
class GameServerList:
def __init__(self, duration=70):
self._expirationset = expirationset(duration, self._remove_callback)
self._server_id_dict = {}
self._endpoint_dict = {}
self._lobby_dict = {}
def _remove_callback(self, server_id, expired):
server = self._server_id_dict.pop(server_id)
if(server.ipv4_endpoint is not None):
del self._endpoint_dict[server.ipv4_endpoint]
if(server.ipv6_endpoint is not None):
del self._endpoint_dict[server.ipv6_endpoint]
lobbyset = self._lobby_dict[server.lobby_id]
lobbyset.remove(server)
if(not lobbyset):
del self._lobby_dict[server.lobby_id]
def put(self, server):
""" Register a server in the lobby list.
This server will replace any existing entries for this server ID.
If an entry for this server ID is already present, its endpoint
information will be used to complement the known endpoint(s) of the
new entry, but the old entry itself will be discarded.
The new server will be rejected if a server with a different ID is
already known for the same endpoint.
Warning: Do not modify the server's uuid, lobby or endpoint information
after registering the server. Make a new server instead and register that."""
self._expirationset.cleanup_stale()
# Abort if there is a server with the same endpoint and different ID
if(server.ipv4_endpoint in self._endpoint_dict and self._endpoint_dict[server.ipv4_endpoint] != server.server_id
or server.ipv6_endpoint in self._endpoint_dict and self._endpoint_dict[server.ipv6_endpoint] != server.server_id):
print "Server " + str(server) + " rejected - wrong ID for existing endpoint."
return
# If we already know an alternative endpoint for the server, copy it over.
try:
oldserver = self._server_id_dict[server.server_id]
if(server.ipv4_endpoint is None):
server.ipv4_endpoint = oldserver.ipv4_endpoint
if(server.ipv6_endpoint is None):
server.ipv6_endpoint = oldserver.ipv6_endpoint
except KeyError:
pass
# Remove old entry for the server, if present.
self._expirationset.discard(server.server_id)
# Add the new entry
self._server_id_dict[server.server_id] = server
if(server.ipv4_endpoint):
self._endpoint_dict[server.ipv4_endpoint] = server.server_id
if(server.ipv6_endpoint):
self._endpoint_dict[server.ipv6_endpoint] = server.server_id
self._lobby_dict.setdefault(server.lobby_id, set()).add(server)
self._expirationset.add(server.server_id)
def remove(self, server_id):
self._expirationset.discard(server_id)
def get_servers_in_lobby(self, lobby_id):
self._expirationset.cleanup_stale()
try:
return self._lobby_dict[lobby_id].copy()
except KeyError:
return set()
def get_lobbies(self):
return self._lobby_dict.keys()
GG2_BASE_UUID = uuid.UUID("dea41970-4cea-a588-df40-62faef6f1738")
GG2_LOBBY_ID = uuid.UUID("1ccf16b1-436d-856f-504d-cc1af306aaa7")
def gg2_version_to_uuid(data):
simplever = ord(data[0])
if(simplever==128):
return uuid.UUID(bytes=data[1:17])
else:
return uuid.UUID(int=GG2_BASE_UUID.int+simplever)
class GG2LobbyQueryV1(Protocol):
def formatServerData(self, server):
infostr = ""
if(server.passworded): infostr += "!private!"
if("map" in server.infos): infostr += "["+server.infos["map"]+"] "
infostr += server.name
if(server.bots == 0):
infostr += " [%u/%u]" % (server.players, server.slots)
else:
infostr += " [%u+%u/%u]" % (server.players, server.bots, server.slots)
infostr = infostr[:255]
result = chr(len(infostr))+infostr
result += server.ipv4_endpoint[0]
result += struct.pack("<H",server.ipv4_endpoint[1])
return result
def sendReply(self, protocol_id):
servers = self.factory.serverList.get_servers_in_lobby(GG2_LOBBY_ID)
servers = [self.formatServerData(server) for server in servers if server.infos.get("protocol_id")==protocol_id.bytes][:255]
self.transport.write(chr(len(servers))+"".join(servers))
self.transport.loseConnection()
print "Received query for version %s, returned %u Servers." % (protocol_id.hex, len(servers))
def dataReceived(self, data):
self.buffered += data
if(len(self.buffered) > 17):
self.transport.loseConnection()
return
if(ord(self.buffered[0]) != 128 or len(self.buffered)==17):
self.sendReply(gg2_version_to_uuid(self.buffered))
def connectionMade(self):
self.buffered = ""
self.timeout = reactor.callLater(5, self.transport.loseConnection)
def connectionLost(self, reason):
if(self.timeout.active()): self.timeout.cancel()
class NewStyleList(Protocol):
LIST_PROTOCOL_ID = uuid.UUID("297d0df4-430c-bf61-640a-640897eaef57")
def formatKeyValue(self, k, v):
k = k[:255]
v = v[:65535]
return chr(len(k)) + k + struct.pack(">H", len(v)) + v
def formatServerData(self, server):
ipv4_endpoint = server.ipv4_endpoint or ("", 0)
ipv6_endpoint = server.ipv6_endpoint or ("", 0)
flags = (1 if server.passworded else 0)
infos = server.infos.copy()
infos["name"] = server.name
result = struct.pack(">BH4sH16sHHHHH", server.protocol, ipv4_endpoint[1], ipv4_endpoint[0], ipv6_endpoint[1], ipv6_endpoint[0], server.slots, server.players, server.bots, flags, len(infos))
result += "".join([self.formatKeyValue(k, v) for (k, v) in infos.iteritems()])
return struct.pack(">L", len(result))+result
def sendReply(self, lobby_id):
servers = [self.formatServerData(server) for server in self.factory.serverList.get_servers_in_lobby(lobby_id)]
self.transport.write(struct.pack(">L",len(servers))+"".join(servers))
print "Received newstyle query for Lobby %s, returned %u Servers." % (lobby_id.hex, len(servers))
def dataReceived(self, data):
self.buffered += data
if(len(self.buffered) == 32):
if(uuid.UUID(bytes=self.buffered[:16]) == NewStyleList.LIST_PROTOCOL_ID):
self.sendReply(uuid.UUID(bytes=self.buffered[16:32]))
if(len(self.buffered) >= 32):
self.transport.loseConnection()
def connectionMade(self):
self.buffered = ""
self.list_protocol = None
self.timeout = reactor.callLater(5, self.transport.loseConnection)
def connectionLost(self, reason):
if(self.timeout.active()): self.timeout.cancel()
class SimpleTCPReachabilityCheck(Protocol):
def __init__(self, server, host, port, serverList):
self.__server = server
self.__host = host
self.__port = port
self.__serverList = serverList
def connectionMade(self):
print "Connection check successful for %s" % (self.__server)
self.__serverList.put(self.__server)
self.transport.loseConnection()
class SimpleTCPReachabilityCheckFactory(ClientFactory):
def __init__(self, server, host, port, serverList):
self.__server = server
self.__host = host
self.__port = port
self.__serverList = serverList
def buildProtocol(self, addr):
return SimpleTCPReachabilityCheck(self.__server, self.__host, self.__port, self.__serverList)
def clientConnectionFailed(self, connector, reason):
print "Connection check failed for %s" % (self.__server)
# TODO: Better flood control using a leaky bucket counter
RECENT_ENDPOINTS = expirationset(10)
class GG2LobbyRegV1(DatagramProtocol):
MAGIC_NUMBERS = chr(4)+chr(8)+chr(15)+chr(16)+chr(23)+chr(42)
INFO_PATTERN = re.compile(r"\A(!private!)?(?:\[([^\]]*)\])?\s*(.*?)\s*(?:\[(\d+)/(\d+)\])?(?: - (.*))?\Z", re.DOTALL)
CONN_CHECK_FACTORY = Factory()
CONN_CHECK_FACTORY.protocol = SimpleTCPReachabilityCheck
def __init__(self, serverList):
self.serverList = serverList
def datagramReceived(self, data, (host, origport)):
if((host, origport) in RECENT_ENDPOINTS): return
RECENT_ENDPOINTS.add((host, origport))
if(not data.startswith(GG2LobbyRegV1.MAGIC_NUMBERS)): return
data = data[6:]
if((len(data) < 1) or (ord(data[0])==128 and len(data) < 17)): return
protocol_id = gg2_version_to_uuid(data)
if(ord(data[0])==128): data = data[17:]
else: data = data[1:]
if((len(data) < 3)): return
port = struct.unpack("<H", data[:2])[0]
infolen = ord(data[2])
infostr = data[3:]
if(len(infostr) != infolen): return
ip = socket.inet_aton(host)
server_id = uuid.UUID(int=GG2_BASE_UUID.int+(struct.unpack("!L",ip)[0]<<16)+port)
server = GameServer(server_id, GG2_LOBBY_ID)
server.infos["protocol_id"] = protocol_id.bytes
server.ipv4_endpoint = (ip, port)
server.infos["game"] = "Legacy Gang Garrison 2 version or mod";
server.infos["game_short"] = "old";
matcher = GG2LobbyRegV1.INFO_PATTERN.match(infostr)
if(matcher):
if(matcher.group(1) is not None): server.passworded = True
if(matcher.group(2) is not None): server.infos["map"] = matcher.group(2)
server.name = matcher.group(3)
if(matcher.group(4) is not None): server.players = int(matcher.group(4))
if(matcher.group(5) is not None): server.slots = int(matcher.group(5))
if(matcher.group(6) is not None):
mod = matcher.group(6)
if(mod=="OHU"):
server.infos["game"] = "Orpheon's Hosting Utilities"
server.infos["game_short"] = "ohu"
server.infos["game_url"] = "http://www.ganggarrison.com/forums/index.php?topic=28839.0"
else:
server.infos["game"] = mod
if(len(mod)<=10): del server.infos["game_short"]
else:
server.name = infostr
conn = reactor.connectTCP(host, port, SimpleTCPReachabilityCheckFactory(server, host, port, self.serverList), timeout=5)
class GG2LobbyQueryV1Factory(Factory):
protocol = GG2LobbyQueryV1
def __init__(self, serverList):
self.gg2_lobby_id = uuid.UUID("1ccf16b1-436d-856f-504d-cc1af306aaa7")
self.serverList = serverList
class NewStyleListFactory(Factory):
protocol = NewStyleList
def __init__(self, serverList):
self.serverList = serverList
class NewStyleReg(DatagramProtocol):
REG_PROTOCOLS = {}
def __init__(self, serverList):
self.serverList = serverList
def datagramReceived(self, data, (host, origport)):
if(len(data) < 16): return
try:
reg_protocol = NewStyleReg.REG_PROTOCOLS[uuid.UUID(bytes=data[0:16])]
except KeyError:
return
reg_protocol.handle(data, (host, origport), self.serverList)
class GG2RegHandler(object):
def handle(self, data, (host, origport), serverList):
if((host, origport) in RECENT_ENDPOINTS): return
RECENT_ENDPOINTS.add((host, origport))
if(len(data) < 61): return
server_id = uuid.UUID(bytes=data[16:32])
lobby_id = uuid.UUID(bytes=data[32:48])
server = GameServer(server_id, lobby_id)
server.protocol = ord(data[48])
if(server.protocol not in (0,1)): return
port = struct.unpack(">H", data[49:51])[0]
if(port == 0): return
ip = socket.inet_aton(host)
server.ipv4_endpoint = (ip, port)
server.slots, server.players, server.bots = struct.unpack(">HHH", data[51:57])
server.passworded = ((ord(data[58]) & 1) != 0)
kventries = struct.unpack(">H", data[59:61])[0]
kvtable = data[61:]
for i in xrange(kventries):
if(len(kvtable) < 1): return
keylen = ord(kvtable[0])
valueoffset = keylen+3
if(len(kvtable) < valueoffset): return
key = kvtable[1:keylen+1]
valuelen = struct.unpack(">H", kvtable[keylen+1:valueoffset])[0]
if(len(kvtable) < valueoffset+valuelen): return
value = kvtable[valueoffset:valueoffset+valuelen]
server.infos[key] = value
kvtable = kvtable[valueoffset+valuelen:]
try:
server.name = server.infos.pop("name")
except KeyError:
return
if(server.protocol == 0):
conn = reactor.connectTCP(host, port, SimpleTCPReachabilityCheckFactory(server, host, port, serverList), timeout=5)
else:
serverList.put(server)
# TODO: Prevent datagram reordering from re-registering a server (e.g. block the server ID for a few seconds)
class GG2UnregHandler(object):
def handle(self, data, (host, origport), serverList):
if(len(data) != 32): return
serverList.remove(uuid.UUID(bytes=data[16:32]))
NewStyleReg.REG_PROTOCOLS[uuid.UUID("b5dae2e8-424f-9ed0-0fcb-8c21c7ca1352")] = GG2RegHandler()
NewStyleReg.REG_PROTOCOLS[uuid.UUID("488984ac-45dc-86e1-9901-98dd1c01c064")] = GG2UnregHandler()
serverList = GameServerList()
reactor.listenUDP(29942, GG2LobbyRegV1(serverList))
reactor.listenUDP(29944, NewStyleReg(serverList))
reactor.listenTCP(29942, GG2LobbyQueryV1Factory(serverList))
reactor.listenTCP(29944, NewStyleListFactory(serverList))
webres = twisted.web.static.File("httpdocs")
webres.putChild("status", weblist.LobbyStatusResource(serverList))
reactor.listenTCP(29950, twisted.web.server.Site(webres))
reactor.run()
| |
import urllib
import os
import uuid
import requests
import stripe
import json
import flask
from flask import Flask, render_template, request, redirect, session, url_for, g
from flask.ext.sqlalchemy import SQLAlchemy
from flask_socketio import SocketIO
from flask_failsafe import failsafe
import calendar
import time
import jinja2
app = Flask(__name__, static_url_path='/static')
app.config['PROPAGATE_EXCEPTIONS'] = True
app.jinja_loader = jinja2.FileSystemLoader(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates'))
socketio = SocketIO(app)
from threading import Thread
thread = None
from flask import send_from_directory
app.config.from_pyfile('_config.py')
db = SQLAlchemy(app)
import models
from sqlalchemy import and_
import calendar
import time
from flask_oauth import OAuth
import ast
oauth = OAuth()
from sqlalchemy.orm.attributes import flag_modified
import uuid
from flask import jsonify
import requests
from flask_login import LoginManager, UserMixin, login_user, logout_user,\
current_user
from datadotworld.config import DefaultConfig
from datadotworld.datadotworld import DataDotWorld
import datadotworld
class InlineConfig(DefaultConfig):
def __init__(self, token):
super(InlineConfig, self).__init__()
self._auth_token = token
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(session_token):
return models.User.query.filter_by(session_token=session_token).first()
@app.route('/login', strict_slashes=False)
def login():
location = 'https://data.world/oauth/authorize?client_id=%s&redirect_uri=https://dw_experiments_dev.hardworkingcoder.com/dwoauth&response_type=code' % app.config['DATADOTWORLD_CLIENT_ID']
return flask.redirect(location, code=302)
def get_access_info(code):
params = {
'code': code,
'client_id': app.config['DATADOTWORLD_CLIENT_ID'],
'client_secret': app.config['DATADOTWORLD_CLIENT_SECRET'].replace('#', '%23'),
'grant_type': 'authorization_code'
}
params_as_str = '&'.join(['='.join(pair) for pair in params.items()])
url = 'https://data.world/oauth/access_token?%s' % (params_as_str)
response = requests.post(url)
return response.json()
def get_user_info(access_token):
url = "https://api.data.world/v0/user"
payload = "{}"
headers = {'authorization': 'Bearer <<%s>>' % (access_token)}
response = requests.request("GET", url, data=payload, headers=headers)
return response.json()
def update_db_with_access_and_user_info(access_info, user_info):
user_exists = db.session.query(models.User.social_id).filter_by(social_id=user_info['id']).scalar() is not None
if user_exists:
user = models.User.query.filter_by(social_id=user_info['id']).first()
user.ddw_access_token = access_info['access_token']
user.ddw_token_expires_in = access_info['expires_in']
user.ddw_avatar_url = user_info['avatarUrl']
user.nickname = user_info['displayName']
user.ddw_user_updated = user_info['updated']
db.session.commit()
else:
user = models.User(ddw_access_token=access_info['access_token'], ddw_token_expires_in=access_info['expires_in'], ddw_avatar_url=user_info['avatarUrl'], nickname=user_info['displayName'], social_id=user_info['id'], ddw_user_created=user_info['created'], ddw_user_updated=user_info['updated'])
db.session.add(user)
db.session.commit()
return user
row2dict = lambda r: {c.name: str(getattr(r, c.name)) for c in r.__table__.columns}
@app.route('/api/get_users_info')
def get_users_info():
return jsonify(row2dict(load_user(session['user_id'])))
@app.route('/dwoauth', strict_slashes=False)
def dwoauth():
access_info = get_access_info(request.args.get('code'))
user_info = get_user_info(access_info['access_token'])
user = update_db_with_access_and_user_info(access_info, user_info)
session.clear()
login_user(user, True)
return flask.redirect('/', code=302)
@app.route('/logout', strict_slashes=False)
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/api/list_users_datasets')
def list_users_datasets():
import requests
url = "https://api.data.world/v0/user/datasets/own"
payload = "{}"
headers = {'authorization': 'Bearer <<%s>>' % (load_user(session['user_id']).ddw_access_token)}
response = requests.request("GET", url, data=payload, headers=headers)
return jsonify(json.loads(response.text))
@app.route('/', strict_slashes=False)
def index():
return send_from_directory('static', 'index.html')
def get_ddw(user_id):
token = load_user(user_id).ddw_access_token
ddw = DataDotWorld(config=InlineConfig(token))
ddw_client = ddw.api_client
return ddw, ddw_client
@app.route('/api/create_dataset', strict_slashes=False, methods=['POST'])
def create_dataset():
ddw, ddw_client = get_ddw(session['user_id'])
owner = load_user(session['user_id']).social_id
return ddw_client.create_dataset(owner, title=request.form['title'], license=request.form['license'], visibility=request.form['visibility'])
@app.route('/api/delete_file', strict_slashes=False, methods=['POST'])
def delete_file():
ddw, ddw_client = get_ddw(session['user_id'])
ddw_client.delete_files('%s/%s' % (request.form['owner'], request.form['id']), [request.form['filename']])
return jsonify({'success': True})
def change_to_csv(filename):
filename = filename.lower()
index_of_dot = filename.index('.')
return filename[:index_of_dot] + '.csv'
@app.route('/api/rename_file', strict_slashes=False, methods=['POST'])
def rename_file():
ddw, ddw_client = get_ddw(session['user_id'])
from os.path import expanduser, join
home = expanduser("~")
local_ddw_data = join(home, '.dw/cache/%s/%s/latest/data/' % (request.form['owner'], request.form['id']))
ddw.load_dataset('%s/%s' % (request.form['owner'], request.form['id']), force_update=True)
os.rename(join(local_ddw_data, change_to_csv(request.form['filename'])), join(local_ddw_data, change_to_csv(request.form['new_filename'])))
ddw_client.delete_files('%s/%s' % (request.form['owner'], request.form['id']), [request.form['filename']])
ddw_client.upload_files('%s/%s' % (request.form['owner'], request.form['id']), [join(local_ddw_data, change_to_csv(request.form['new_filename']))])
return jsonify({'success': True})
@app.route('/api/move_file', strict_slashes=False, methods=['POST'])
def move_file():
ddw, ddw_client = get_ddw(session['user_id'])
from os.path import expanduser, join
home = expanduser("~")
local_ddw_data = join(home, '.dw/cache/%s/%s/latest/data/' % (request.form['owner'], request.form['current_id']))
ddw.load_dataset('%s/%s' % (request.form['owner'], request.form['current_id']), force_update=True)
ddw_client.upload_files('%s/%s' % (request.form['owner'], request.form['target_id']), [join(local_ddw_data, change_to_csv(request.form['filename']))])
ddw_client.delete_files('%s/%s' % (request.form['owner'], request.form['current_id']), [request.form['filename']])
return jsonify({'success': True})
@app.route('/api/upload_file', strict_slashes=False, methods=['POST'])
def upload_file():
print 'UF', request.form
print 'R', request.files
ddw, ddw_client = get_ddw(session['user_id'])
from os.path import expanduser, join
home = expanduser("~")
file = request.files['file_0']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
jsonify({'success': False})
if file:
filename = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
ddw_client.upload_files('%s/%s' % (request.form['owner'], request.form['id']), [join(app.config['UPLOAD_FOLDER'], filename)])
return jsonify({'success': True})
@failsafe
def create_app():
return app
import eventlet
eventlet.monkey_patch()
if __name__ == '__main__':
socketio.run(create_app(), debug=True, port=5000)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Program to (re)categorize images at commons.
The program uses commonshelper for category suggestions.
It takes the suggestions and the current categories. Put the categories through
some filters and adds the result.
The following command line parameters are supported:
-onlyfilter Don't use Commonsense to get categories, just filter the
current categories
-onlyuncat Only work on uncategorized images. Will prevent the bot from
working on an image multiple times.
-hint Give Commonsense a hint.
For example -hint:li.wikipedia.org
-onlyhint Give Commonsense a hint. And only work on this hint.
Syntax is the same as -hint. Some special hints are possible:
_20 : Work on the top 20 wikipedia's
_80 : Work on the top 80 wikipedia's
wps : Work on all wikipedia's
"""
#
# (C) Multichill, 2008-2011
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
import re
import socket
import sys
import time
import xml.etree.ElementTree
import pywikibot
from pywikibot import pagegenerators, textlib
from pywikibot.comms.http import fetch
if sys.version_info[0] > 2:
from urllib.parse import urlencode
else:
from urllib import urlencode
category_blacklist = []
countries = []
search_wikis = u'_20'
hint_wiki = u''
def initLists():
"""Get the list of countries & the blacklist from Commons."""
global category_blacklist
global countries
blacklistPage = pywikibot.Page(pywikibot.Site(u'commons', u'commons'),
u'User:Multichill/Category_blacklist')
for cat in blacklistPage.linkedPages():
category_blacklist.append(cat.title(withNamespace=False))
countryPage = pywikibot.Page(pywikibot.Site(u'commons', u'commons'),
u'User:Multichill/Countries')
for country in countryPage.linkedPages():
countries.append(country.title(withNamespace=False))
return
def categorizeImages(generator, onlyFilter, onlyUncat):
"""Loop over all images in generator and try to categorize them.
Get category suggestions from CommonSense.
"""
for page in generator:
if page.exists() and (page.namespace() == 6) and \
(not page.isRedirectPage()):
imagepage = pywikibot.FilePage(page.site, page.title())
pywikibot.output(u'Working on ' + imagepage.title())
if onlyUncat and not(u'Uncategorized' in imagepage.templates()):
pywikibot.output(u'No Uncategorized template found')
else:
currentCats = getCurrentCats(imagepage)
if onlyFilter:
commonshelperCats = []
usage = []
galleries = []
else:
(commonshelperCats, usage, galleries) = getCommonshelperCats(imagepage)
newcats = applyAllFilters(commonshelperCats + currentCats)
if len(newcats) > 0 and not(set(currentCats) == set(newcats)):
for cat in newcats:
pywikibot.output(u' Found new cat: ' + cat)
saveImagePage(imagepage, newcats, usage, galleries,
onlyFilter)
def getCurrentCats(imagepage):
"""Get the categories currently on the image."""
result = []
for cat in imagepage.categories():
result.append(cat.title(withNamespace=False))
return list(set(result))
def getCommonshelperCats(imagepage):
"""Get category suggestions from CommonSense.
@rtype: list of unicode
"""
commonshelperCats = []
usage = []
galleries = []
global search_wikis
global hint_wiki
site = imagepage.site
lang = site.code
family = site.family.name
if lang == u'commons' and family == u'commons':
parameters = urlencode(
{'i': imagepage.title(withNamespace=False).encode('utf-8'),
'r': 'on',
'go-clean': 'Find+Categories',
'p': search_wikis,
'cl': hint_wiki})
elif family == u'wikipedia':
parameters = urlencode(
{'i': imagepage.title(withNamespace=False).encode('utf-8'),
'r': 'on',
'go-move': 'Find+Categories',
'p': search_wikis,
'cl': hint_wiki,
'w': lang})
else:
# Cant handle other sites atm
return [], [], []
commonsenseRe = re.compile(
r'^#COMMONSENSE(.*)#USAGE(\s)+\((?P<usagenum>(\d)+)\)\s(?P<usage>(.*))\s'
r'#KEYWORDS(\s)+\((?P<keywords>(\d)+)\)(.*)'
r'#CATEGORIES(\s)+\((?P<catnum>(\d)+)\)\s(?P<cats>(.*))\s'
r'#GALLERIES(\s)+\((?P<galnum>(\d)+)\)\s(?P<gals>(.*))\s(.*)#EOF$',
re.MULTILINE + re.DOTALL)
gotInfo = False
matches = None
maxtries = 10
tries = 0
while not gotInfo:
try:
if tries < maxtries:
tries += 1
commonsHelperPage = fetch(
"https://toolserver.org/~daniel/WikiSense/CommonSense.php?%s" % parameters)
matches = commonsenseRe.search(
commonsHelperPage.content)
gotInfo = True
else:
break
except IOError:
pywikibot.output(u'Got an IOError, let\'s try again')
except socket.timeout:
pywikibot.output(u'Got a timeout, let\'s try again')
if matches and gotInfo:
if matches.group('usagenum') > 0:
used = matches.group('usage').splitlines()
for use in used:
usage = usage + getUsage(use)
if matches.group('catnum') > 0:
cats = matches.group('cats').splitlines()
for cat in cats:
commonshelperCats.append(cat.replace('_', ' '))
pywikibot.output(u'category : ' + cat)
if matches.group('galnum') > 0:
gals = matches.group('gals').splitlines()
for gal in gals:
galleries.append(gal.replace('_', ' '))
pywikibot.output(u'gallery : ' + gal)
commonshelperCats = list(set(commonshelperCats))
galleries = list(set(galleries))
for (lang, project, article) in usage:
pywikibot.output(lang + project + article)
return commonshelperCats, usage, galleries
def getOpenStreetMapCats(latitude, longitude):
"""Get a list of location categories based on the OSM nomatim tool."""
result = []
locationList = getOpenStreetMap(latitude, longitude)
for i in range(0, len(locationList)):
pywikibot.log(u'Working on %r' % locationList[i])
if i <= len(locationList) - 3:
category = getCategoryByName(name=locationList[i],
parent=locationList[i + 1],
grandparent=locationList[i + 2])
elif i == len(locationList) - 2:
category = getCategoryByName(name=locationList[i],
parent=locationList[i + 1])
else:
category = getCategoryByName(name=locationList[i])
if category and not category == u'':
result.append(category)
return result
def getOpenStreetMap(latitude, longitude):
"""
Get the result from https://nominatim.openstreetmap.org/reverse .
@rtype: list of tuples
"""
result = []
gotInfo = False
parameters = urlencode({'lat': latitude, 'lon': longitude, 'accept-language': 'en'})
while not gotInfo:
try:
page = fetch('https://nominatim.openstreetmap.org/reverse?format=xml&%s' % parameters)
et = xml.etree.ElementTree.fromstring(page.content)
gotInfo = True
except IOError:
pywikibot.output(u'Got an IOError, let\'s try again')
time.sleep(30)
except socket.timeout:
pywikibot.output(u'Got a timeout, let\'s try again')
time.sleep(30)
validParts = [u'hamlet', u'village', u'city', u'county', u'country']
invalidParts = [u'path', u'road', u'suburb', u'state', u'country_code']
addressparts = et.find('addressparts')
for addresspart in addressparts.getchildren():
if addresspart.tag in validParts:
result.append(addresspart.text)
elif addresspart.tag in invalidParts:
pywikibot.output(u'Dropping %s, %s' % (addresspart.tag, addresspart.text))
else:
pywikibot.warning('%s, %s is not in addressparts lists'
% (addresspart.tag, addresspart.text))
return result
def getCategoryByName(name, parent=u'', grandparent=u''):
"""Get category by name."""
if not parent == u'':
workname = name.strip() + u',_' + parent.strip()
workcat = pywikibot.Category(pywikibot.Site(u'commons', u'commons'), workname)
if workcat.exists():
return workname
if not grandparent == u'':
workname = name.strip() + u',_' + grandparent.strip()
workcat = pywikibot.Category(pywikibot.Site(u'commons', u'commons'), workname)
if workcat.exists():
return workname
workname = name.strip()
workcat = pywikibot.Category(pywikibot.Site(u'commons', u'commons'), workname)
if workcat.exists():
return workname
return u''
def getUsage(use):
"""Parse the Commonsense output to get the usage."""
result = []
lang = ''
project = ''
article = ''
usageRe = re.compile(
r'^(?P<lang>([\w-]+))\.(?P<project>([\w]+))\.org:(?P<articles>\s(.*))')
matches = usageRe.search(use)
if matches:
if matches.group('lang'):
lang = matches.group('lang')
if matches.group('project'):
project = matches.group('project')
if matches.group('articles'):
articles = matches.group('articles')
for article in articles.split():
result.append((lang, project, article))
return result
def applyAllFilters(categories):
"""Apply all filters on categories."""
result = []
result = filterDisambiguation(categories)
result = followRedirects(result)
result = filterBlacklist(result)
result = filterCountries(result)
result = filterParents(result)
return result
def filterBlacklist(categories):
"""Filter out categories which are on the blacklist."""
result = []
for cat in categories:
cat = cat.replace('_', ' ')
if not (cat in category_blacklist):
result.append(cat)
return list(set(result))
def filterDisambiguation(categories):
"""Filter out disambiguation categories."""
result = []
for cat in categories:
if (not pywikibot.Page(pywikibot.Site(u'commons', u'commons'),
cat, ns=14).isDisambig()):
result.append(cat)
return result
def followRedirects(categories):
"""If a category is a redirect, replace the category with the target."""
result = []
for cat in categories:
categoryPage = pywikibot.Page(pywikibot.Site(u'commons', u'commons'),
cat, ns=14)
if categoryPage.isCategoryRedirect():
result.append(
categoryPage.getCategoryRedirectTarget().title(
withNamespace=False))
else:
result.append(cat)
return result
def filterCountries(categories):
"""Try to filter out ...by country categories.
First make a list of any ...by country categories and try to find some
countries. If a by country category has a subcategoy containing one of the
countries found, add it. The ...by country categories remain in the set and
should be filtered out by filterParents.
"""
result = categories
listByCountry = []
listCountries = []
for cat in categories:
if cat.endswith(u'by country'):
listByCountry.append(cat)
# If cat contains 'by country' add it to the list
# If cat contains the name of a country add it to the list
else:
for country in countries:
if country in cat:
listCountries.append(country)
if len(listByCountry) > 0:
for bc in listByCountry:
category = pywikibot.Category(
pywikibot.Site(u'commons', u'commons'), u'Category:' + bc)
for subcategory in category.subcategories():
for country in listCountries:
if subcategory.title(withNamespace=False).endswith(country):
result.append(subcategory.title(withNamespace=False))
return list(set(result))
def filterParents(categories):
"""Remove all parent categories from the set to prevent overcategorization."""
result = []
toFilter = u''
for cat in categories:
cat = cat.replace('_', ' ')
toFilter = toFilter + "[[Category:" + cat + "]]\n"
parameters = urlencode({'source': toFilter.encode('utf-8'),
'bot': '1'})
filterCategoriesRe = re.compile(r'\[\[Category:([^\]]*)\]\]')
try:
filterCategoriesPage = fetch(
"https://toolserver.org/~multichill/filtercats.php?%s" % parameters)
result = filterCategoriesRe.findall(
filterCategoriesPage.content)
except IOError:
# Something is wrong, forget about this filter, and return the input
return categories
if not result:
# Is empty, dont want to remove all categories
return categories
return result
def saveImagePage(imagepage, newcats, usage, galleries, onlyFilter):
"""Remove the old categories and add the new categories to the image."""
newtext = textlib.removeCategoryLinks(imagepage.text, imagepage.site)
if not onlyFilter:
newtext = removeTemplates(newtext)
newtext = newtext + getCheckCategoriesTemplate(usage, galleries,
len(newcats))
newtext += u'\n'
for category in newcats:
newtext = newtext + u'[[Category:' + category + u']]\n'
if onlyFilter:
comment = u'Filtering categories'
else:
comment = ('Image is categorized by a bot using data from '
'[[Commons:Tools#CommonSense|CommonSense]]')
pywikibot.showDiff(imagepage.text, newtext)
imagepage.text = newtext
imagepage.save(comment)
return
def removeTemplates(oldtext=u''):
"""Remove {{Uncategorized}} and {{Check categories}} templates."""
result = re.sub(
r'{{\s*([Uu]ncat(egori[sz]ed( image)?)?|[Nn]ocat|[Nn]eedscategory)[^}]*}}',
u'', oldtext)
result = re.sub(u'<!-- Remove this line once you have added categories -->',
u'', result)
result = re.sub(r'\{\{\s*[Cc]heck categories[^}]*\}\}', u'', result)
return result
def getCheckCategoriesTemplate(usage, galleries, ncats):
"""Build the check categories template with all parameters."""
result = ('{{Check categories|year={{subst:CURRENTYEAR}}|month={{subst:'
'CURRENTMONTHNAME}}|day={{subst:CURRENTDAY}}\n')
usageCounter = 1
for (lang, project, article) in usage:
result += u'|lang%d=%s' % (usageCounter, lang)
result += u'|wiki%d=%s' % (usageCounter, project)
result += u'|article%d=%s' % (usageCounter, article)
result += u'\n'
usageCounter += 1
galleryCounter = 1
for gallery in galleries:
result += u'|gallery%d=%s' % (galleryCounter, gallery.replace('_', ' ')) + u'\n'
galleryCounter += 1
result += u'|ncats=%d\n' % ncats
result += u'}}\n'
return result
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
generator = None
onlyFilter = False
onlyUncat = False
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
global search_wikis
global hint_wiki
for arg in local_args:
if arg == '-onlyfilter':
onlyFilter = True
elif arg == '-onlyuncat':
onlyUncat = True
elif arg.startswith('-hint:'):
hint_wiki = arg[len('-hint:'):]
elif arg.startswith('-onlyhint'):
search_wikis = arg[len('-onlyhint:'):]
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
if not generator:
site = pywikibot.Site(u'commons', u'commons')
generator = pagegenerators.CategorizedPageGenerator(
pywikibot.Category(site, u'Category:Media needing categories'),
recurse=True)
initLists()
categorizeImages(generator, onlyFilter, onlyUncat)
pywikibot.output(u'All done')
if __name__ == "__main__":
main()
| |
# -*- coding: utf-8 -*-
"""
Class and methods to handle Job submission.
"""
import os as _os
import sys as _sys
from uuid import uuid4 as _uuid
from time import sleep as _sleep
from datetime import datetime as _dt
from subprocess import CalledProcessError as _CalledProcessError
# Try to use dill, revert to pickle if not found
import dill as _pickle
from six import reraise as _reraise
###############################################################################
# Our functions #
###############################################################################
from . import run as _run
from . import conf as _conf
from . import queue as _queue
from . import logme as _logme
from . import local as _local
from . import options as _options
from . import script_runners as _scrpts
from . import ClusterError as _ClusterError
from .submission_scripts import Script as _Script
from .submission_scripts import Function as _Function
__all__ = ['Job']
###############################################################################
# The Job Class #
###############################################################################
class Job(object):
"""Information about a single job on the cluster.
Holds information about submit time, number of cores, the job script,
and more.
Below are the core attributes and methods required to use this class.
Attributes:
out (str): The output of the function or a copy of stdout
for a script
stdout (str): Any output to STDOUT
stderr (str): Any output to STDERR
exitcode (int): The exitcode of the running processes (the script
runner if the Job is a function.
start (datetime): A datetime object containing time execution
started on the remote node.
end (datetime): Like start but when execution ended.
runtime (timedelta): A timedelta object containing runtime.
files (list): A list of script files associated with this class
done (bool): True if the job has completed
Methods:
submit(): submit the job if it is ready
wait(): block until the job is done
get(): block until the job is done and then return the output
(stdout if job is a script), by default saves all outputs to
self (i.e. .out, .stdout, .stderr) and deletes all
intermediate files before returning. If `save` argument is
`False`, does not delete the output files by default.
clean(): delete any files created by this object
Printing or reproducing the class will display detailed job information.
Both `wait()` and `get()` will update the queue every few seconds
(defined by the queue_update item in the config) and add queue information
to the job as they go.
If the job disappears from the queue with no information, it will be listed
as 'completed'.
All jobs have a .submission attribute, which is a Script object containing
the submission script for the job and the file name, plus a 'written' bool
that checks if the file exists.
In addition, SLURM jobs have a .exec_script attribute, which is a Script
object containing the shell command to _run. This difference is due to the
fact that some SLURM systems execute multiple lines of the submission file
at the same time.
Finally, if the job command is a function, this object will also contain a
`.function` attribute, which contains the script to run the function.
"""
id = None
submitted = False
written = False
found = False
submit_time = None
# Holds a pool object if we are in local mode
pool_job = None
# Scripts
submission = None
exec_script = None
function = None
imports = None
# Dependencies
dependencies = None
# Pickled output file for functions
poutfile = None
# Holds queue information in torque and slurm
queue_info = None
# Output tracking
_got_out = False
_got_stdout = False
_got_stderr = False
_got_exitcode = False
_out = None
_stdout = None
_stderr = None
_exitcode = None
# Time tracking
_got_times = False
start = None
end = None
# Track update status
_updating = False
# Auto Cleaning
clean_files = _conf.get_option('jobs', 'clean_files')
clean_outputs = _conf.get_option('jobs', 'clean_outputs')
def __init__(self, command, args=None, kwargs=None, name=None, qtype=None,
profile=None, **kwds):
"""Initialization function arguments.
Args:
command (function/str): The command or function to execute.
args (tuple/dict): Optional arguments to add to command,
particularly useful for functions.
kwargs (dict): Optional keyword arguments to pass to the
command, only used for functions.
name (str): Optional name of the job. If not defined,
guessed. If a job of the same name is
already queued, an integer job number (not
the queue number) will be added, ie.
<name>.1
qtype (str): Override the default queue type
profile (str): The name of a profile saved in the
conf
*All other keywords are parsed into cluster keywords by the
options system. For available keywords see `fyrd.option_help()`*
"""
########################
# Sanitize arguments #
########################
_logme.log('Args pre-check: {}'.format(kwds), 'debug')
kwds = _options.check_arguments(kwds)
_logme.log('Args post-check: {}'.format(kwds), 'debug')
# Override autoclean state (set in config file)
if 'clean_files' in kwds:
self.clean_files = kwds.pop('clean_files')
if 'clean_outputs' in kwds:
self.clean_outputs = kwds.pop('clean_outputs')
# Path handling
[kwds, self.runpath,
self.outpath, self.scriptpath] = _conf.get_job_paths(kwds)
# Save command
self.command = command
self.args = args
# Merge in profile, this includes all args from the DEFAULT profile
# as well, ensuring that those are always set at a minumum.
profile = profile if profile else 'DEFAULT'
prof = _conf.get_profile(profile)
if not prof:
raise _ClusterError('No profile found for {}'.format(profile))
for k,v in prof.args.items():
if k not in kwds:
kwds[k] = v
# Use the default profile as a backup if any arguments missing
default_args = _conf.DEFAULT_PROFILES['DEFAULT']
default_args.update(_conf.get_profile('DEFAULT').args)
for opt, arg in default_args.items():
if opt not in kwds:
_logme.log('{} not in kwds, adding from default: {}:{}'
.format(opt, opt, arg), 'debug')
kwds[opt] = arg
# Get environment
if not _queue.MODE:
_queue.MODE = _queue.get_cluster_environment()
self.qtype = qtype if qtype else _queue.MODE
self.queue = _queue.Queue(user='self', qtype=self.qtype)
self.state = 'Not_Submitted'
# Set name
if not name:
if callable(command):
strcmd = str(command).strip('<>')
parts = strcmd.split(' ')
if parts[0] == 'bound':
name = '_'.join(parts[2:3])
else:
parts.remove('function')
try:
parts.remove('built-in')
except ValueError:
pass
name = parts[0]
else:
name = command.split(' ')[0].split('/')[-1]
# Make sure name not in queue
self.uuid = str(_uuid()).split('-')[0]
names = [i.name.split('.')[0] for i in self.queue]
namecnt = len([i for i in names if i == name])
name = '{}.{}.{}'.format(name, namecnt, self.uuid)
self.name = name
# Set modules
self.modules = kwds.pop('modules') if 'modules' in kwds else None
if self.modules:
self.modules = _run.opt_split(self.modules, (',', ';'))
# Make sure args are a tuple or dictionary
if args:
if isinstance(args, str):
args = tuple(args)
if not isinstance(args, (tuple, dict)):
try:
args = tuple(args)
except TypeError:
args = (args,)
# In case cores are passed as None
if 'nodes' not in kwds:
kwds['nodes'] = default_args['nodes']
if 'cores' not in kwds:
kwds['cores'] = default_args['cores']
self.nodes = kwds['nodes']
self.cores = kwds['cores']
# Set output files
suffix = kwds.pop('suffix') if 'suffix' in kwds \
else _conf.get_option('jobs', 'suffix')
if 'outfile' in kwds:
pth, fle = _os.path.split(kwds['outfile'])
if not pth:
pth = self.outpath
kwds['outfile'] = _os.path.join(pth, fle)
else:
kwds['outfile'] = _os.path.join(
self.outpath, '.'.join([name, suffix, 'out']))
if 'errfile' in kwds:
pth, fle = _os.path.split(kwds['errfile'])
if not pth:
pth = self.outpath
kwds['errfile'] = _os.path.join(pth, fle)
else:
kwds['errfile'] = _os.path.join(
self.outpath, '.'.join([name, suffix, 'err']))
self.outfile = kwds['outfile']
self.errfile = kwds['errfile']
# Check and set dependencies
if 'depends' in kwds:
dependencies = _run.listify(kwds.pop('depends'))
self.dependencies = []
errmsg = 'Dependencies must be number or list'
for dependency in dependencies:
if isinstance(dependency, str):
if not dependency.isdigit():
raise _ClusterError(errmsg)
dependency = int(dependency)
if not isinstance(dependency, (int, Job)):
raise _ClusterError(errmsg)
self.dependencies.append(dependency)
######################################
# Command and Function Preparation #
######################################
# Get imports
imports = kwds.pop('imports') if 'imports' in kwds else None
# Get syspaths
syspaths = kwds.pop('syspaths') if 'syspaths' in kwds else None
# Split out sys.paths from imports and set imports in self
if imports:
self.imports = []
syspaths = syspaths if syspaths else []
for i in imports:
if i.startswith('sys.path.append')\
or i.startswith('sys.path.insert'):
syspaths.append(i)
else:
self.imports.append(i)
# Function specific initialization
if callable(command):
self.kind = 'function'
script_file = _os.path.join(
self.scriptpath, '{}_func.{}.py'.format(name, suffix)
)
self.poutfile = self.outfile + '.func.pickle'
self.function = _Function(
file_name=script_file, function=command, args=args,
kwargs=kwargs, imports=self.imports, syspaths=syspaths,
outfile=self.poutfile
)
# Collapse the _command into a python call to the function script
executable = '#!/usr/bin/env python{}'.format(
_sys.version_info.major) if _conf.get_option(
'jobs', 'generic_python') else _sys.executable
command = '{} {}'.format(executable, self.function.file_name)
args = None
else:
self.kind = 'script'
self.poutfile = None
# Collapse args into command
command = command + ' '.join(args) if args else command
#####################
# Script Creation #
#####################
# Build execution wrapper with modules
precmd = ''
if self.modules:
for module in self.modules:
precmd += 'module load {}\n'.format(module)
# Create queue-dependent scripts
sub_script = ''
if self.qtype == 'slurm':
scrpt = _os.path.join(
self.scriptpath, '{}.{}.sbatch'.format(name, suffix)
)
# We use a separate script and a single srun command to avoid
# issues with multiple threads running at once
exec_script = _os.path.join(self.scriptpath,
'{}.{}.script'.format(name, suffix))
exe_script = _scrpts.CMND_RUNNER_TRACK.format(
precmd=precmd, usedir=self.runpath, name=name, command=command)
# Create the exec_script Script object
self.exec_script = _Script(script=exe_script,
file_name=exec_script)
# Add all of the keyword arguments at once
precmd = _options.options_to_string(kwds, self.qtype) + precmd
ecmnd = 'srun bash {}'.format(exec_script)
sub_script = _scrpts.SCRP_RUNNER.format(precmd=precmd,
script=exec_script,
command=ecmnd)
elif self.qtype == 'torque':
scrpt = _os.path.join(self.scriptpath,
'{}.cluster.qsub'.format(name))
# Add all of the keyword arguments at once
precmd = _options.options_to_string(kwds, self.qtype) + precmd
sub_script = _scrpts.CMND_RUNNER_TRACK.format(
precmd=precmd, usedir=self.runpath, name=name, command=command)
elif self.qtype == 'local':
# Create the pool
if not _local.JQUEUE or not _local.JQUEUE.runner.is_alive():
threads = kwds['threads'] if 'threads' in kwds \
else _local.THREADS
_local.JQUEUE = _local.JobQueue(cores=threads)
scrpt = _os.path.join(self.scriptpath, '{}.cluster'.format(name))
sub_script = _scrpts.CMND_RUNNER_TRACK.format(
precmd=precmd, usedir=self.runpath, name=name, command=command)
else:
raise _ClusterError('Invalid queue type')
# Create the submission Script object
self.submission = _Script(script=sub_script,
file_name=scrpt)
# Save the keyword arguments for posterity
self.kwargs = kwds
####################
# Public Methods #
####################
def write(self, overwrite=True):
"""Write all scripts.
Args:
overwrite (bool): Overwrite existing files, defaults to True.
"""
_logme.log('Writing files, overwrite={}'.format(overwrite), 'debug')
self.submission.write(overwrite)
if self.exec_script:
self.exec_script.write(overwrite)
if self.function:
self.function.write(overwrite)
self.written = True
def clean(self, delete_outputs=None, get_outputs=True):
"""Delete all scripts created by this module, if they were written.
Args:
delete_outputs (bool): also delete all output and err files,
but get their contents first.
get_outputs (bool): if delete_outputs, save outputs before
deleting.
"""
_logme.log('Cleaning outputs, delete_outputs={}'
.format(delete_outputs), 'debug')
if delete_outputs is None:
delete_outputs = self.clean_outputs
assert isinstance(delete_outputs, bool)
for jobfile in [self.submission, self.exec_script, self.function]:
if jobfile:
jobfile.clean()
if delete_outputs:
_logme.log('Deleting output files.', 'debug')
if get_outputs:
self.fetch_outputs(delete_files=True)
for f in self.outfiles:
if _os.path.isfile(f):
_logme.log('Deleteing {}'.format(f), 'debug')
_os.remove(f)
def submit(self, wait_on_max_queue=True):
"""Submit this job.
Args:
wait_on_max_queue (bool): Block until queue limit is below the
maximum before submitting.
To disable max_queue_len, set it to 0. None will allow override by
the default settings in the config file, and any positive integer will
be interpretted to be the maximum queue length.
Returns:
self
"""
if self.submitted:
_logme.log('Not submitting, already submitted.', 'warn')
return self
if not self.written:
self.write()
# Check dependencies
dependencies = []
if self.dependencies:
for depend in self.dependencies:
if isinstance(depend, Job):
if not depend.id:
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has not been submitted',
'error'
)
return self
dependencies.append(int(depend.id))
else:
dependencies.append(int(depend))
# Wait on the queue if necessary
if wait_on_max_queue:
self.update()
self.queue.wait_to_submit()
# Only include queued or running dependencies
self.queue._update() # Force update
depends = []
for depend in dependencies:
dep_check = self.queue.check_dependencies(depend)
if dep_check == 'absent':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'is not in the queue',
'error'
)
return self
elif dep_check == 'good':
_logme.log(
'Dependency {} is complete, skipping'
.format(depend), 'debug'
)
elif dep_check == 'bad':
_logme.log(
'Cannot submit job as dependency {} '
.format(depend) + 'has failed',
'error'
)
return self
elif dep_check == 'active':
if self.queue.jobs[depend].state == 'completeing':
continue
_logme.log('Dependency {} is {}, adding to deps'
.format(depend, self.queue.jobs[depend].state),
'debug')
depends.append(depend)
else:
# This shouldn't happen ever
raise _ClusterError('fyrd.queue.Queue.check_dependencies() ' +
'returned an unrecognized value {}'
.format(dep_check))
if self.qtype == 'local':
# Normal mode dependency tracking uses only integer job numbers
_logme.log('Submitting to local', 'debug')
command = 'bash {}'.format(self.submission.file_name)
fileargs = dict(stdout=self.outfile,
stderr=self.errfile)
# Make sure the global job pool exists
if not _local.JQUEUE or not _local.JQUEUE.runner.is_alive():
_local.JQUEUE = _local.JobQueue(cores=_local.THREADS)
self.id = _local.JQUEUE.add(_run.cmd, args=(command,),
kwargs=fileargs,
dependencies=depends,
cores=self.cores)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
elif self.qtype == 'slurm':
_logme.log('Submitting to slurm', 'debug')
if self.depends:
deps = '--dependency=afterok:{}'.format(
':'.join([str(d) for d in depends]))
args = ['sbatch', deps, self.submission.file_name]
else:
args = ['sbatch', self.submission.file_name]
# Try to submit job 5 times
code, stdout, stderr = _run.cmd(args, tries=5)
if code == 0:
self.id = int(stdout.split(' ')[-1])
else:
_logme.log('sbatch failed with code {}\n'.format(code) +
'stdout: {}\nstderr: {}'.format(stdout, stderr),
'critical')
raise _CalledProcessError(code, args, stdout, stderr)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
elif self.qtype == 'torque':
_logme.log('Submitting to torque', 'debug')
if self.depends:
deps = '-W depend={}'.format(
','.join(['afterok:' + str(d) for d in depends]))
args = ['qsub', deps, self.submission.file_name]
else:
args = ['qsub', self.submission.file_name]
# Try to submit job 5 times
code, stdout, stderr = _run.cmd(args, tries=5)
if code == 0:
self.id = int(stdout.split('.')[0])
elif code == 17 and 'Unable to open script file' in stderr:
_logme.log('qsub submission failed due to an already existing '
'script file, attempting to rename file and try '
'again.\nstderr: {}, stdout: {}, cmnd: {}'
.format(stderr, stdout, args), 'error')
new_name = args[1] + '.resub'
_os.rename(args[1], new_name)
_logme.log('renamed script {} to {}, resubmitting'
.format(args[1], new_name), 'info')
args[1] = new_name
code, stdout, stderr = _run.cmd(args, tries=5)
if code == 0:
self.id = int(stdout.split('.')[0])
else:
_logme.log('Resubmission still failed, aborting',
'critical')
raise _CalledProcessError(code, args, stdout, stderr)
else:
if stderr.startswith('qsub: submit error ('):
raise _ClusterError('qsub submission failed with error: ' +
'{}, command: {}'.format(stderr, args))
else:
_logme.log(
'qsub failed with code {}\nstdout: {}\nstderr: {}'
.format(code, stdout, stderr), 'critical'
)
raise _CalledProcessError(code, args, stdout, stderr)
self.submitted = True
self.submit_time = _dt.now()
self.state = 'submitted'
else:
raise _ClusterError("Invalid queue type {}".format(self.qtype))
if not self.submitted:
raise _ClusterError('Submission appears to have failed, this '
"shouldn't happen")
return self
def resubmit(self):
"""Attempt to auto resubmit, deletes prior files."""
self.clean(delete_outputs=True)
self.state = 'Not_Submitted'
self.write()
return self.submit()
def wait(self):
"""Block until job completes."""
if not self.submitted:
if _conf.get_option('jobs', 'auto_submit'):
_logme.log('Auto-submitting as not submitted yet', 'debug')
self.submit()
_sleep(0.5)
else:
_logme.log('Cannot wait for result as job has not been ' +
'submitted', 'warn')
return False
_sleep(0.1)
self.update(False)
if not self.done:
_logme.log('Waiting for self {}'.format(self.name), 'debug')
status = self.queue.wait(self.id)
if status == 'disappeared':
self.state = status
elif status is not True:
return False
else:
self.update()
if self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exitcode {}'
.format(self.exitcode), 'debug')
return False
if self.wait_for_files(caution_message=False):
self.update()
if self.state == 'disappeared':
_logme.log('Job files found for disappered job, assuming '
'success', 'info')
return 'disappeared'
return True
else:
if self.state == 'disappeared':
_logme.log('Disappeared job has no output files, assuming '
'failure', 'error')
return False
def wait_for_files(self, btme=None, caution_message=False):
"""Block until files appear up to 'file_block_time' in config file.
Aborts after 2 seconds if job exit code is not 0.
Args:
btme (int): Number of seconds to try for before giving
up, default set in config file.
caution_message (bool): Display a message if this is taking
a while.
Returns:
bool: True if files found
"""
if not self.done:
_logme.log("Cannot wait for files if we aren't complete",
'warn')
return False
wait_time = 0.1 # seconds
if btme:
lvl = 'debug'
else:
lvl = 'warn'
btme = _conf.get_option('jobs', 'file_block_time')
start = _dt.now()
dsp = False
_logme.log('Checking for output files', 'debug')
while True:
runtime = (_dt.now() - start).seconds
if caution_message and runtime > 1:
_logme.log('Job complete.', 'info')
_logme.log('Waiting for output files to appear.', 'info')
caution_message = False
if not dsp and runtime > 20:
_logme.log('Still waiting for output files to appear',
'info')
dsp = True
count = 0
outfiles = self.incomplete_outfiles
tlen = len(outfiles)
if not outfiles:
_logme.log('No incomplete outfiles, assuming all found in ' +
'{} seconds'.format(runtime), 'debug')
break
for i in outfiles:
if _os.path.isfile(i):
count += 1
if count == tlen:
_logme.log('All output files found in {} seconds'
.format(runtime), 'debug')
break
_sleep(wait_time)
if runtime > btme:
_logme.log('Job completed but files have not appeared for ' +
'>{} seconds'.format(btme), lvl)
return False
self.update()
if runtime > 2 and self.get_exitcode(update=False) != 0:
_logme.log('Job failed with exit code {}.'
.format(self.exitcode) + ' Cannot find files.',
'error')
return False
if _queue.MODE == 'local':
_logme.log('Job output files were not found.', 'error')
_logme.log('Expected files: {}'.format(self.outfiles))
return False
return True
def get(self, save=True, cleanup=None, delete_outfiles=None,
del_no_save=None, raise_on_error=True):
"""Block until job completed and return output of script/function.
By default saves all outputs to this class and deletes all intermediate
files.
Args:
save (bool): Save all outputs to the class also (advised)
cleanup (bool): Clean all intermediate files after job
completes.
delete_outfiles (bool): Clean output files after job completes.
del_no_save (bool): Delete output files even if `save` is
`False`
raise_on_error (bool): If the returned output is an Exception,
raise it.
Returns:
str: Function output if Function, else STDOUT
"""
_logme.log(('Getting outputs, cleanup={}, autoclean={}, '
'delete_outfiles={}').format(
cleanup, self.clean_files, delete_outfiles
), 'debug')
# Wait for queue
status = self.wait()
if status == 'disappeared':
_logme.log('Job disappeared from queue, attempting to get outputs',
'debug')
try:
self.fetch_outputs(save=save, delete_files=False,
get_stats=False)
except IOError:
_logme.log('Job disappeared from the queue and files could not'
' be found, job must have died and been deleted '
'from the queue', 'critical')
raise IOError('Job {} disappeared, output files missing'
.format(self))
elif status is not True:
_logme.log('Wait failed, cannot get outputs, aborting', 'error')
self.update()
if _os.path.isfile(self.errfile):
if _logme.MIN_LEVEL in ['debug', 'verbose']:
_sys.stderr.write('STDERR of Job:\n')
_sys.stderr.write(self.get_stderr(delete_file=False,
update=False))
if self.poutfile and _os.path.isfile(self.poutfile):
_logme.log('Getting pickled output', 'debug')
self.get_output(delete_file=False, update=False,
raise_on_error=raise_on_error)
else:
_logme.log('Pickled out file does not exist, cannot get error',
'debug')
return
else:
# Get output
_logme.log('Wait complete, fetching outputs', 'debug')
self.fetch_outputs(save=save, delete_files=False)
out = self.out if save else self.get_output(save=save)
# Cleanup
if cleanup is None:
cleanup = self.clean_files
else:
assert isinstance(cleanup, bool)
if delete_outfiles is None:
delete_outfiles = self.clean_outputs
if save is False:
delete_outfiles = del_no_save if del_no_save is not None else False
if cleanup:
self.clean(delete_outputs=delete_outfiles)
return out
def get_output(self, save=True, delete_file=None, update=True,
raise_on_error=True):
"""Get output of function or script.
This is the same as stdout for a script, or the function output for
a function.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Args:
save (bool): Save the output to self.out, default True.
Would be a good idea to set to False if the
output is huge.
delete_file (bool): Delete the output file when getting
update (bool): Update job info from queue first.
raise_on_error (bool): If the returned output is an Exception,
raise it.
Returns:
The output of the script or function. Always a string if script.
"""
_logme.log(('Getting output, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if delete_file is None:
delete_file = self.clean_outputs
if self.kind == 'script':
return self.get_stdout(save=save, delete_file=delete_file,
update=update)
if self.done and self._got_out:
_logme.log('Getting output from _out', 'debug')
return self._out
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self.wait_for_files()
else:
_logme.log('Cannot get pickled output before job completes',
'warn')
return None
_logme.log('Getting output from {}'.format(self.poutfile), 'debug')
if _os.path.isfile(self.poutfile):
with open(self.poutfile, 'rb') as fin:
out = _pickle.load(fin)
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.poutfile),
'debug')
_os.remove(self.poutfile)
if save:
self._out = out
self._got_out = True
if _run.is_exc(out):
_logme.log('{} failed with exception {}'.format(self, out[1]),
'error')
if raise_on_error:
_reraise(*out)
return out
else:
_logme.log('No file at {} even though job has completed!'
.format(self.poutfile), 'critical')
raise IOError('File not found: {}'.format(self.poutfile))
def get_stdout(self, save=True, delete_file=None, update=True):
"""Get stdout of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Args:
save (bool): Save the output to self.stdout, default True.
Would be a good idea to set to False if the
output is huge.
delete_file (bool): Delete the stdout file when getting
update (bool): Update job info from queue first.
Returns:
str: The contents of STDOUT, with runtime info and trailing
newline removed.
Also sets self.start and self.end from the contents of STDOUT if
possible.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stdout, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stdout:
_logme.log('Getting stdout from _stdout', 'debug')
return self._stdout
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self.wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDOUT ' +
'anyway', 'info')
_logme.log('Getting stdout from {}'.format(self.kwargs['outfile']),
'debug')
if _os.path.isfile(self.kwargs['outfile']):
self.get_times(update=False)
stdout = open(self.kwargs['outfile']).read()
if stdout:
stdouts = stdout.split('\n')
stdout = '\n'.join(stdouts[2:-3]) + '\n'
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.kwargs['outfile']),
'debug')
_os.remove(self.kwargs['outfile'])
if save:
self._stdout = stdout
if self.done:
self._got_stdout = True
return stdout
else:
_logme.log('No file at {}, cannot get stdout'
.format(self.kwargs['outfile']), 'warn')
return None
def get_stderr(self, save=True, delete_file=None, update=True):
"""Get stderr of function or script, same for both.
By default, output file is kept unless delete_file is True or
self.clean_files is True.
Args:
save (bool): Save the output to self.stdout, default True.
Would be a good idea to set to False if the
output is huge.
delete_file (bool): Delete the stdout file when getting
update (bool): Update job info from queue first.
Returns:
str: The contents of STDERR, with trailing newline removed.
"""
if delete_file is None:
delete_file = self.clean_outputs
_logme.log(('Getting stderr, save={}, clean_files={}, '
'delete_file={}').format(
save, self.clean_files, delete_file
), 'debug')
if self.done and self._got_stderr:
_logme.log('Getting stderr from _stderr', 'debug')
return self._stderr
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self.wait_for_files()
else:
_logme.log('Job not done, attempting to get current STDERR ' +
'anyway', 'info')
_logme.log('Getting stderr from {}'.format(self.kwargs['errfile']),
'debug')
if _os.path.isfile(self.kwargs['errfile']):
stderr = open(self.kwargs['errfile']).read()
if delete_file is True or self.clean_files is True:
_logme.log('Deleting {}'.format(self.kwargs['errfile']),
'debug')
_os.remove(self.kwargs['errfile'])
if save:
self._stderr = stderr
if self.done:
self._got_stderr = True
return stderr
else:
_logme.log('No file at {}, cannot get stderr'
.format(self.kwargs['errfile']), 'warn')
return None
def get_times(self, update=True):
"""Get stdout of function or script, same for both.
Args:
update (bool): Update job info from queue first.
Returns:
tuple: start, end as two datetime objects.
Also sets self.start and self.end from the contents of STDOUT if
possible.
"""
_logme.log('Getting times', 'debug')
if self.done and self._got_times:
_logme.log('Getting times from self.start, self.end', 'debug')
return self.start, self.end
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self.wait_for_files()
else:
_logme.log('Cannot get times until job is complete.', 'warn')
return None, None
_logme.log('Getting times from {}'.format(self.kwargs['outfile']),
'debug')
if _os.path.isfile(self.kwargs['outfile']):
stdout = open(self.kwargs['outfile']).read()
if stdout:
stdouts = stdout.split('\n')
# Get times
timefmt = '%y-%m-%d-%H:%M:%S'
try:
self.start = _dt.strptime(stdouts[0], timefmt)
self.end = _dt.strptime(stdouts[-2], timefmt)
except ValueError as err:
_logme.log('Time parsing failed with value error; ' +
'{}. '.format(err) + 'This may be because you ' +
'are using the script running that does not ' +
'include time tracking', 'debug')
self._got_times = True
return self.start, self.end
else:
_logme.log('No file at {}, cannot get times'
.format(self.kwargs['outfile']), 'warn')
return None
def get_exitcode(self, update=True):
"""Try to get the exitcode.
Args:
update (bool): Update job info from queue first.
Returns:
int: The exitcode of the running process.
"""
_logme.log('Getting exitcode', 'debug')
if self.done and self._got_exitcode:
_logme.log('Getting exitcode from _exitcode', 'debug')
return self._exitcode
if self.status == 'disappeared':
_logme.log('Cannot get exitcode for disappeared job', 'debug')
return 0
if update and not self._updating and not self.done:
self.update()
if self.done:
if update:
self.wait_for_files()
else:
_logme.log('Job is not complete, no exit code yet', 'info')
return None
_logme.log('Getting exitcode from queue', 'debug')
if not self.queue_info:
self.queue_info = self.queue[self.id]
if hasattr(self.queue_info, 'exitcode'):
code = self.queue_info.exitcode
else:
code = None
_logme.log('No exitcode even though the job is done, this ' +
"shouldn't happen.", 'warn')
self._exitcode = code
self._got_exitcode = True
if code != 0:
self.state = 'failed'
_logme.log('Job {} failed with exitcode {}'
.format(self.name, code), 'error')
return code
def update(self, fetch_info=True):
"""Update status from the queue.
Args:
fetch_info (bool): Fetch basic job info if complete.
"""
if not self._updating:
self._update(fetch_info)
else:
_logme.log('Already updating, aborting.', 'debug')
def update_queue_info(self):
"""Set queue_info from the queue even if done."""
_logme.log('Updating queue_info', 'debug')
queue_info1 = self.queue[self.id]
self.queue.update()
queue_info2 = self.queue[self.id]
if queue_info2:
self.queue_info = queue_info2
elif queue_info1:
self.queue_info = queue_info1
elif self.queue_info is None and self.submitted:
_logme.log('Cannot find self in the queue and queue_info is empty',
'warn')
return self.queue_info
def fetch_outputs(self, save=True, delete_files=None, get_stats=True):
"""Save all outputs in their current state. No return value.
This method does not wait for job completion, but merely gets the
outputs. To wait for job completion, use `get()` instead.
Args:
save (bool): Save all outputs to the class also (advised)
delete_files (bool): Delete the output files when getting, only
used if save is True
get_stats (bool): Try to get exitcode.
"""
_logme.log('Saving outputs to self, delete_files={}'
.format(delete_files), 'debug')
self.update()
if delete_files is None:
delete_files = self.clean_outputs
if not self._got_exitcode and get_stats:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
if save:
self.get_output(save=True, delete_file=delete_files, update=False)
self.get_stdout(save=True, delete_file=delete_files, update=False)
self.get_stderr(save=True, delete_file=delete_files, update=False)
@property
def files(self):
"""Build a list of files associated with this class."""
files = [self.submission]
if self.kind == 'script':
files.append(self.exec_script)
if self.kind == 'function':
files.append(self.function)
return files
@property
def runtime(self):
"""Return the runtime."""
if not self.done:
_logme.log('Cannot get runtime as not yet complete.' 'warn')
return None
if not self.start:
self.get_times()
return self.end-self.start
@property
def done(self):
"""Check if completed or not.
Updates the Job and Queue.
Returns:
Bool: True if complete, False otherwise.
"""
# We have the same statement twice to try and avoid updating.
if self.state in _queue.DONE_STATES:
return True
if not self._updating:
self.update()
if self.state in _queue.DONE_STATES:
return True
return False
###############
# Internals #
###############
def _update(self, fetch_info=True):
"""Update status from the queue.
Args:
fetch_info (bool): Fetch basic job info if complete.
"""
_logme.log('Updating job.', 'debug')
self._updating = True
if self.done or not self.submitted:
self._updating = False
return
self.queue.update()
if self.submitted and self.id:
queue_info = self.queue[self.id]
if queue_info:
assert self.id == queue_info.id
self.found = True
self.queue_info = queue_info
self.state = self.queue_info.state
if self.done and fetch_info:
if self.wait_for_files(btme=1, caution_message=False):
if not self._got_exitcode:
self.get_exitcode(update=False)
if not self._got_times:
self.get_times(update=False)
elif self.found:
_logme.log('Job appears to have disappeared, waiting for '
'reappearance, this may take a while', 'warn')
status = self.wait()
if status == 'disappeared':
_logme.log('Job disappeared, but the output files are '
'present', 'info')
elif not status:
_logme.log('Job appears to have failed and disappeared',
'error')
# If job not found after 360 seconds, assume trouble
elif self.submitted and (_dt.now()-self.submit_time).seconds > 1000:
s = (_dt.now()-self.submit_time).seconds
_logme.log('Job not in queue after {} seconds of searching.'
.format(s), 'warn')
self._updating = False
@property
def outfiles(self):
"""A list of all outfiles associated with this Job."""
outfiles = [self.outfile, self.errfile]
if self.poutfile:
outfiles.append(self.poutfile)
return outfiles
@property
def incomplete_outfiles(self):
"""A list of all outfiles that haven't already been fetched."""
outfiles = []
if self.outfile and not self._got_stdout:
outfiles.append(self.outfile)
if self.errfile and not self._got_stderr:
outfiles.append(self.errfile)
if self.poutfile and not self._got_out:
outfiles.append(self.poutfile)
return outfiles
def __getattr__(self, key):
"""Dynamically get out, stdout, stderr, and exitcode."""
if key == 'out':
return self.get_output()
elif key == 'stdout':
return self.get_stdout()
elif key == 'stderr':
return self.get_stderr()
elif key == 'exitcode':
return self.get_exitcode()
elif key == 'err':
return self.get_stderr()
def __repr__(self):
"""Return simple job information."""
outstr = "Job:{name}<{mode}:{qtype}".format(
name=self.name, mode=self.kind, qtype=self.qtype)
if self.submitted:
outstr += ':{}'.format(self.id)
outstr += "(command:{cmnd})".format(cmnd=self.command)
if self.submitted or self.done:
outstr += self.state.upper()
elif self.written:
outstr += "WRITTEN"
else:
outstr += "NOT_SUBMITTED"
outstr += ">"
return outstr
def __str__(self):
"""Print job name and ID + status."""
if self.done:
state = 'completed'
id1 = str(self.id)
elif self.written:
state = 'written'
id1 = str(self.id)
else:
state = 'not written'
id1 = 'NA'
return "Job: {name} ID: {id}, state: {state}".format(
name=self.name, id=id1, state=state)
| |
'''
MFEM example 11
See c++ version in the MFEM library for more detail
How to run:
mpirun -np 2 python <arguments>
Example of arguments:
ex11p.py -m square-disc.mesh
ex11p.py -m star.mesh
ex11p.py -m escher.mesh
ex11p.py -m fichera.mesh
ex11p.py -m square-disc-p2.vtk -o 2
ex11p.py -m square-disc-p3.mesh -o 3
ex11p.py -m square-disc-nurbs.mesh -o -1
ex11p.py -m disc-nurbs.mesh -o -1 -n 20
ex11p.py -m pipe-nurbs.mesh -o -1
ex11p.py -m ball-nurbs.mesh -o 2
ex11p.py -m star-surf.mesh
ex11p.py -m square-disc-surf.mesh
ex11p.py -m inline-segment.mesh
ex11p.py -m amr-quad.mesh
ex11p.py -m amr-hex.mesh
ex11p.py -m mobius-strip.mesh -n 8
ex11p.py -m klein-bottle.mesh -n 10
'''
import sys
from os.path import expanduser, join, dirname
import numpy as np
from mfem.common.arg_parser import ArgParser
import mfem.par as mfem
from mpi4py import MPI
num_procs = MPI.COMM_WORLD.size
myid = MPI.COMM_WORLD.rank
parser = ArgParser(description='Ex11 ')
parser.add_argument('-m', '--mesh',
default='star.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument('-rs', '--refine-serial',
default=2,
action='store', type=int,
help="Number of times to refine the mesh uniformly in serial.")
parser.add_argument('-rp', '--refine-parallel',
default=1,
action='store', type=int,
help="Number of times to refine the mesh uniformly in parallel.")
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help=("Finite element order (polynomial degree) or -1 for isoparametric space."))
parser.add_argument("-n", "--num-eigs",
action='store', default=5, type=int,
help="Number of desired eigenmodes.")
parser.add_argument("-sp", "--strumpack",
action='store_true', default=False,
help="Use the STRUMPACK Solver.")
parser.add_argument('-vis', '--visualization',
action='store_true', default=True,
help='Enable GLVis visualization')
args = parser.parse_args()
ser_ref_levels = args.refine_serial
par_ref_levels = args.refine_parallel
order = args.order
nev = args.num_eigs
visualization = args.visualization
use_strumpack = args.strumpack
if (myid == 0):
parser.print_options(args)
# 3. Read the mesh from the given mesh file on all processors. We can handle
# triangular, quadrilateral, tetrahedral, hexahedral, surface and volume
# meshes with the same code
meshfile = expanduser(join(dirname(__file__), '..', 'data', args.mesh))
mesh = mfem.Mesh(meshfile, 1, 1)
dim = mesh.Dimension()
# 4. Refine the serial mesh on all processors to increase the resolution. In
# this example we do 'ref_levels' of uniform refinement (2 by default, or
# specified on the command line with -rs).
for x in range(ser_ref_levels):
mesh.UniformRefinement()
# 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
# this mesh further in parallel to increase the resolution (1 time by
# default, or specified on the command line with -rp). Once the parallel
# mesh is defined, the serial mesh can be deleted.
pmesh = mfem.ParMesh(MPI.COMM_WORLD, mesh)
del mesh
for l in range(par_ref_levels):
pmesh.UniformRefinement()
# 6. Define a parallel finite element space on the parallel mesh. Here we
# use continuous Lagrange finite elements of the specified order. If
# order < 1, we instead use an isoparametric/isogeometric space.
if order > 0:
fec = mfem.H1_FECollection(order, dim)
elif pmesh.GetNodes():
fec = pmesh.GetNodes().OwnFEC()
else:
fec = mfem.H1_FECollection(1, dim)
fespace = mfem.ParFiniteElementSpace(pmesh, fec)
fe_size = fespace.GlobalTrueVSize()
if (myid == 0):
print('Number of unknowns: ' + str(fe_size))
# 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
# element space. The first corresponds to the Laplacian operator -Delta,
# while the second is a simple mass matrix needed on the right hand side
# of the generalized eigenvalue problem below. The boundary conditions
# are implemented by elimination with special values on the diagonal to
# shift the Dirichlet eigenvalues out of the computational range. After
# serial and parallel assembly we extract the corresponding parallel
# matrices A and M.
one = mfem.ConstantCoefficient(1.0)
ess_bdr = mfem.intArray()
if pmesh.bdr_attributes.Size() != 0:
ess_bdr.SetSize(pmesh.bdr_attributes.Max())
ess_bdr.Assign(1)
a = mfem.ParBilinearForm(fespace)
a.AddDomainIntegrator(mfem.DiffusionIntegrator(one))
if pmesh.bdr_attributes.Size() == 0:
# Add a mass term if the mesh has no boundary, e.g. periodic mesh or
# closed surface.
a.AddDomainIntegrator(mfem.MassIntegrator(one))
a.Assemble()
a.EliminateEssentialBCDiag(ess_bdr, 1.0)
a.Finalize()
m = mfem.ParBilinearForm(fespace)
m.AddDomainIntegrator(mfem.MassIntegrator(one))
m.Assemble()
# shift the eigenvalue corresponding to eliminated dofs to a large value
m.EliminateEssentialBCDiag(ess_bdr, 3.0e-300)
m.Finalize()
A = a.ParallelAssemble()
M = m.ParallelAssemble()
if use_strumpack:
import mfem.par.strumpack as strmpk
Arow = strmpk.STRUMPACKRowLocMatrix(A)
# 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
# preconditioner for A to be used within the solver. Set the matrices
# which define the generalized eigenproblem A x = lambda M x.
# We don't support SuperLU
if use_strumpack:
args = ["--sp_hss_min_sep_size", "128", "--sp_enable_hss"]
strumpack = strmpk.STRUMPACKSolver(args, MPI.COMM_WORLD)
strumpack.SetPrintFactorStatistics(True)
strumpack.SetPrintSolveStatistics(False)
strumpack.SetKrylovSolver(strmpk.KrylovSolver_DIRECT)
strumpack.SetReorderingStrategy(strmpk.ReorderingStrategy_METIS)
strumpack.SetMC64Job(strmpk.MC64Job_NONE)
# strumpack.SetSymmetricPattern(True)
strumpack.SetOperator(Arow)
strumpack.SetFromCommandLine()
precond = strumpack
else:
amg = mfem.HypreBoomerAMG(A)
amg.SetPrintLevel(0)
precond = amg
lobpcg = mfem.HypreLOBPCG(MPI.COMM_WORLD)
lobpcg.SetNumModes(nev)
lobpcg.SetPreconditioner(precond)
lobpcg.SetMaxIter(200)
lobpcg.SetTol(1e-8)
lobpcg.SetPrecondUsageMode(1)
lobpcg.SetPrintLevel(1)
lobpcg.SetMassMatrix(M)
lobpcg.SetOperator(A)
# 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
# parallel grid function to represent each of the eigenmodes returned by
# the solver.
eigenvalues = mfem.doubleArray()
lobpcg.Solve()
lobpcg.GetEigenvalues(eigenvalues)
x = mfem.ParGridFunction(fespace)
# 10. Save the refined mesh and the modes in parallel. This output can be
# viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
smyid = '{:0>6d}'.format(myid)
mesh_name = "mesh."+smyid
pmesh.Print(mesh_name, 8)
for i in range(nev):
x.Assign(lobpcg.GetEigenvector(i))
sol_name = "mode_"+str(i).zfill(2)+"."+smyid
x.Save(sol_name, 8)
# 11. Send the solution by socket to a GLVis server.
if (visualization):
mode_sock = mfem.socketstream("localhost", 19916)
mode_sock.precision(8)
for i in range(nev):
if (myid == 0):
print("Eigenmode " + str(i+1) + '/' + str(nev) +
", Lambda = " + str(eigenvalues[i]))
# convert eigenvector from HypreParVector to ParGridFunction
print(lobpcg.GetEigenvector(i))
x.Assign(lobpcg.GetEigenvector(i))
mode_sock.send_text("parallel " + str(num_procs) + " " + str(myid))
mode_sock.send_solution(pmesh, x)
mode_sock.send_text("window_title 'Eigenmode " + str(i+1) + '/' +
str(nev) + ", Lambda = " + str(eigenvalues[i]) + "'")
c = None
if (myid == 0):
from builtins import input
c = input("press (q)uit or (c)ontinue --> ")
c = MPI.COMM_WORLD.bcast(c, root=0)
if (c != 'c'):
break
mode_sock.close()
| |
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
V8 correctness fuzzer launcher script.
"""
# for py2/py3 compatibility
from __future__ import print_function
import argparse
import hashlib
import itertools
import json
import os
import random
import re
import sys
import traceback
import v8_commands
import v8_suppressions
CONFIGS = dict(
default=[],
ignition=[
'--turbo-filter=~',
'--noopt',
'--liftoff',
'--no-wasm-tier-up',
],
ignition_asm=[
'--turbo-filter=~',
'--noopt',
'--validate-asm',
'--stress-validate-asm',
],
ignition_eager=[
'--turbo-filter=~',
'--noopt',
'--no-lazy',
'--no-lazy-inner-functions',
],
ignition_no_ic=[
'--turbo-filter=~',
'--noopt',
'--liftoff',
'--no-wasm-tier-up',
'--no-use-ic',
'--no-lazy-feedback-allocation',
],
ignition_turbo=[],
ignition_turbo_no_ic=[
'--no-use-ic',
],
ignition_turbo_opt=[
'--always-opt',
'--no-liftoff',
'--no-wasm-tier-up',
'--no-lazy-feedback-allocation'
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
'--no-lazy-feedback-allocation',
],
jitless=[
'--jitless',
],
slow_path=[
'--force-slow-path',
],
slow_path_opt=[
'--always-opt',
'--force-slow-path',
'--no-lazy-feedback-allocation',
],
trusted=[
'--no-untrusted-code-mitigations',
],
trusted_opt=[
'--always-opt',
'--no-untrusted-code-mitigations',
'--no-lazy-feedback-allocation',
],
)
# Timeout in seconds for one d8 run.
TIMEOUT = 3
# Return codes.
RETURN_PASS = 0
RETURN_FAIL = 2
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PREAMBLE = [
os.path.join(BASE_PATH, 'v8_mock.js'),
os.path.join(BASE_PATH, 'v8_suppressions.js'),
]
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
FLAGS = ['--correctness-fuzzer-suppressions', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
'--es-staging', '--wasm-staging', '--no-wasm-async-compilation',
'--suppress-asm-messages']
SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
# Output for suppressed failure case.
FAILURE_HEADER_TEMPLATE = """#
# V8 correctness failure
# V8 correctness configs: %(configs)s
# V8 correctness sources: %(source_key)s
# V8 correctness suppression: %(suppression)s
"""
# Extended output for failure case. The 'CHECK' is for the minimizer.
FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
# CHECK
#
# Compared %(first_config_label)s with %(second_config_label)s
#
# Flags of %(first_config_label)s:
%(first_config_flags)s
# Flags of %(second_config_label)s:
%(second_config_flags)s
#
# Difference:
%(difference)s%(source_file_text)s
#
### Start of configuration %(first_config_label)s:
%(first_config_output)s
### End of configuration %(first_config_label)s
#
### Start of configuration %(second_config_label)s:
%(second_config_output)s
### End of configuration %(second_config_label)s
"""
SOURCE_FILE_TEMPLATE = """
#
# Source file:
%s"""
FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
# The number of hex digits used from the hash of the original source file path.
# Keep the number small to avoid duplicate explosion.
ORIGINAL_SOURCE_HASH_LENGTH = 3
# Placeholder string if no original source file could be determined.
ORIGINAL_SOURCE_DEFAULT = 'none'
def infer_arch(d8):
"""Infer the V8 architecture from the build configuration next to the
executable.
"""
with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
arch = json.load(f)['v8_current_cpu']
return 'ia32' if arch == 'x86' else arch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--random-seed', type=int, required=True,
help='random seed passed to both runs')
parser.add_argument(
'--first-config', help='first configuration', default='ignition')
parser.add_argument(
'--second-config', help='second configuration', default='ignition_turbo')
parser.add_argument(
'--first-config-extra-flags', action='append', default=[],
help='Additional flags to pass to the run of the first configuration')
parser.add_argument(
'--second-config-extra-flags', action='append', default=[],
help='Additional flags to pass to the run of the second configuration')
parser.add_argument(
'--first-d8', default='d8',
help='optional path to first d8 executable, '
'default: bundled in the same directory as this script')
parser.add_argument(
'--second-d8',
help='optional path to second d8 executable, default: same as first')
parser.add_argument(
'--skip-sanity-checks', default=False, action='store_true',
help='skip sanity checks for testing purposes')
parser.add_argument('testcase', help='path to test case')
options = parser.parse_args()
# Ensure we have a test case.
assert (os.path.exists(options.testcase) and
os.path.isfile(options.testcase)), (
'Test case %s doesn\'t exist' % options.testcase)
# Use first d8 as default for second d8.
options.second_d8 = options.second_d8 or options.first_d8
# Ensure absolute paths.
if not os.path.isabs(options.first_d8):
options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
if not os.path.isabs(options.second_d8):
options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
# Ensure executables exist.
assert os.path.exists(options.first_d8)
assert os.path.exists(options.second_d8)
# Infer architecture from build artifacts.
options.first_arch = infer_arch(options.first_d8)
options.second_arch = infer_arch(options.second_d8)
# Ensure we make a sane comparison.
if (options.first_arch == options.second_arch and
options.first_config == options.second_config):
parser.error('Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
assert options.second_config in CONFIGS
return options
def get_meta_data(content):
"""Extracts original-source-file paths from test case content."""
sources = []
for line in content.splitlines():
match = SOURCE_RE.match(line)
if match:
sources.append(match.group(1))
return {'sources': sources}
def content_bailout(content, ignore_fun):
"""Print failure state and return if ignore_fun matches content."""
bug = (ignore_fun(content) or '').strip()
if bug:
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug))
return True
return False
def pass_bailout(output, step_number):
"""Print info and return if in timeout or crash pass states."""
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number)
return True
if output.HasCrashed():
print('# V8 correctness - C-R-A-S-H %d' % step_number)
return True
return False
def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug))
return True
return False
def print_difference(
options, source_key, first_config_flags, second_config_flags,
first_config_output, second_config_output, difference, source=None):
# The first three entries will be parsed by clusterfuzz. Format changes
# will require changes on the clusterfuzz side.
first_config_label = '%s,%s' % (options.first_arch, options.first_config)
second_config_label = '%s,%s' % (options.second_arch, options.second_config)
source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
print((FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
source_file_text=source_file_text,
source_key=source_key,
suppression='', # We can't tie bugs to differences.
first_config_label=first_config_label,
second_config_label=second_config_label,
first_config_flags=' '.join(first_config_flags),
second_config_flags=' '.join(second_config_flags),
first_config_output=
first_config_output.stdout.decode('utf-8', 'replace'),
second_config_output=
second_config_output.stdout.decode('utf-8', 'replace'),
source=source,
difference=difference.decode('utf-8', 'replace'),
)).encode('utf-8', 'replace'))
def main():
options = parse_args()
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
options.first_arch, options.first_config,
options.second_arch, options.second_config,
)
# Static bailout based on test case content or metadata.
with open(options.testcase) as f:
content = f.read()
if content_bailout(get_meta_data(content), suppress.ignore_by_metadata):
return RETURN_FAIL
if content_bailout(content, suppress.ignore_by_content):
return RETURN_FAIL
# Set up runtime arguments.
common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
first_config_flags = (common_flags + CONFIGS[options.first_config] +
options.first_config_extra_flags)
second_config_flags = (common_flags + CONFIGS[options.second_config] +
options.second_config_extra_flags)
def run_d8(d8, config_flags, config_label=None, testcase=options.testcase):
preamble = PREAMBLE[:]
if options.first_arch != options.second_arch:
preamble.append(ARCH_MOCKS)
args = [d8] + config_flags + preamble + [testcase]
if config_label:
print('# Command line for %s comparison:' % config_label)
print(' '.join(args))
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
return v8_commands.Execute(
args,
cwd=os.path.dirname(os.path.abspath(testcase)),
timeout=TIMEOUT,
)
# Sanity checks. Run both configurations with the sanity-checks file only and
# bail out early if different.
if not options.skip_sanity_checks:
first_config_output = run_d8(
options.first_d8, first_config_flags, testcase=SANITY_CHECKS)
second_config_output = run_d8(
options.second_d8, second_config_flags, testcase=SANITY_CHECKS)
difference, _ = suppress.diff(
first_config_output.stdout, second_config_output.stdout)
if difference:
# Special source key for sanity checks so that clusterfuzz dedupes all
# cases on this in case it's hit.
source_key = 'sanity check failed'
print_difference(
options, source_key, first_config_flags, second_config_flags,
first_config_output, second_config_output, difference)
return RETURN_FAIL
first_config_output = run_d8(options.first_d8, first_config_flags, 'first')
# Early bailout based on first run's output.
if pass_bailout(first_config_output, 1):
return RETURN_PASS
second_config_output = run_d8(
options.second_d8, second_config_flags, 'second')
# Bailout based on second run's output.
if pass_bailout(second_config_output, 2):
return RETURN_PASS
difference, source = suppress.diff(
first_config_output.stdout, second_config_output.stdout)
if source:
source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
else:
source_key = ORIGINAL_SOURCE_DEFAULT
if difference:
# Only bail out due to suppressed output if there was a difference. If a
# suppression doesn't show up anymore in the statistics, we might want to
# remove it.
if fail_bailout(first_config_output, suppress.ignore_by_output1):
return RETURN_FAIL
if fail_bailout(second_config_output, suppress.ignore_by_output2):
return RETURN_FAIL
print_difference(
options, source_key, first_config_flags, second_config_flags,
first_config_output, second_config_output, difference, source)
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
print('# V8 correctness - pass')
return RETURN_PASS
if __name__ == "__main__":
try:
result = main()
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='wrong_usage'))
result = RETURN_FAIL
except MemoryError:
# Running out of memory happens occasionally but is not actionable.
print('# V8 correctness - pass')
result = RETURN_PASS
except Exception as e:
print(FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='internal_error'))
print('# Internal error: %s' % e)
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
sys.exit(result)
| |
# $Id$
from dpkt import Packet
# header_type
QQ_HEADER_BASIC_FAMILY = 0x02
QQ_HEADER_P2P_FAMILY = 0x00
QQ_HEADER_03_FAMILY = 0x03
QQ_HEADER_04_FAMILY = 0x04
QQ_HEADER_05_FAMILY = 0x05
header_type_str = [
"QQ_HEADER_P2P_FAMILY",
"Unknown Type",
"QQ_HEADER_03_FAMILY",
"QQ_HEADER_04_FAMILY",
"QQ_HEADER_05_FAMILY",
]
# command
QQ_CMD_LOGOUT = 0x0001
QQ_CMD_KEEP_ALIVE = 0x0002
QQ_CMD_MODIFY_INFO = 0x0004
QQ_CMD_SEARCH_USER = 0x0005
QQ_CMD_GET_USER_INFO = 0x0006
QQ_CMD_ADD_FRIEND = 0x0009
QQ_CMD_DELETE_FRIEND = 0x000A
QQ_CMD_ADD_FRIEND_AUTH = 0x000B
QQ_CMD_CHANGE_STATUS = 0x000D
QQ_CMD_ACK_SYS_MSG = 0x0012
QQ_CMD_SEND_IM = 0x0016
QQ_CMD_RECV_IM = 0x0017
QQ_CMD_REMOVE_SELF = 0x001C
QQ_CMD_REQUEST_KEY = 0x001D
QQ_CMD_LOGIN = 0x0022
QQ_CMD_GET_FRIEND_LIST = 0x0026
QQ_CMD_GET_ONLINE_OP = 0x0027
QQ_CMD_SEND_SMS = 0x002D
QQ_CMD_CLUSTER_CMD = 0x0030
QQ_CMD_TEST = 0x0031
QQ_CMD_GROUP_DATA_OP = 0x003C
QQ_CMD_UPLOAD_GROUP_FRIEND = 0x003D
QQ_CMD_FRIEND_DATA_OP = 0x003E
QQ_CMD_DOWNLOAD_GROUP_FRIEND = 0x0058
QQ_CMD_FRIEND_LEVEL_OP = 0x005C
QQ_CMD_PRIVACY_DATA_OP = 0x005E
QQ_CMD_CLUSTER_DATA_OP = 0x005F
QQ_CMD_ADVANCED_SEARCH = 0x0061
QQ_CMD_REQUEST_LOGIN_TOKEN = 0x0062
QQ_CMD_USER_PROPERTY_OP = 0x0065
QQ_CMD_TEMP_SESSION_OP = 0x0066
QQ_CMD_SIGNATURE_OP = 0x0067
QQ_CMD_RECV_MSG_SYS = 0x0080
QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS = 0x0081
QQ_CMD_WEATHER_OP = 0x00A6
QQ_CMD_ADD_FRIEND_EX = 0x00A7
QQ_CMD_AUTHORIZE = 0X00A8
QQ_CMD_UNKNOWN = 0xFFFF
QQ_SUB_CMD_SEARCH_ME_BY_QQ_ONLY = 0x03
QQ_SUB_CMD_SHARE_GEOGRAPHY = 0x04
QQ_SUB_CMD_GET_FRIEND_LEVEL = 0x02
QQ_SUB_CMD_GET_CLUSTER_ONLINE_MEMBER = 0x01
QQ_05_CMD_REQUEST_AGENT = 0x0021
QQ_05_CMD_REQUEST_FACE = 0x0022
QQ_05_CMD_TRANSFER = 0x0023
QQ_05_CMD_REQUEST_BEGIN = 0x0026
QQ_CLUSTER_CMD_CREATE_CLUSTER= 0x01
QQ_CLUSTER_CMD_MODIFY_MEMBER= 0x02
QQ_CLUSTER_CMD_MODIFY_CLUSTER_INFO= 0x03
QQ_CLUSTER_CMD_GET_CLUSTER_INFO= 0x04
QQ_CLUSTER_CMD_ACTIVATE_CLUSTER= 0x05
QQ_CLUSTER_CMD_SEARCH_CLUSTER= 0x06
QQ_CLUSTER_CMD_JOIN_CLUSTER= 0x07
QQ_CLUSTER_CMD_JOIN_CLUSTER_AUTH= 0x08
QQ_CLUSTER_CMD_EXIT_CLUSTER= 0x09
QQ_CLUSTER_CMD_SEND_IM= 0x0A
QQ_CLUSTER_CMD_GET_ONLINE_MEMBER= 0x0B
QQ_CLUSTER_CMD_GET_MEMBER_INFO= 0x0C
QQ_CLUSTER_CMD_MODIFY_CARD = 0x0E
QQ_CLUSTER_CMD_GET_CARD_BATCH= 0x0F
QQ_CLUSTER_CMD_GET_CARD = 0x10
QQ_CLUSTER_CMD_COMMIT_ORGANIZATION = 0x11
QQ_CLUSTER_CMD_UPDATE_ORGANIZATION= 0x12
QQ_CLUSTER_CMD_COMMIT_MEMBER_ORGANIZATION = 0x13
QQ_CLUSTER_CMD_GET_VERSION_ID= 0x19
QQ_CLUSTER_CMD_SEND_IM_EX = 0x1A
QQ_CLUSTER_CMD_SET_ROLE = 0x1B
QQ_CLUSTER_CMD_TRANSFER_ROLE = 0x1C
QQ_CLUSTER_CMD_CREATE_TEMP = 0x30
QQ_CLUSTER_CMD_MODIFY_TEMP_MEMBER = 0x31
QQ_CLUSTER_CMD_EXIT_TEMP = 0x32
QQ_CLUSTER_CMD_GET_TEMP_INFO = 0x33
QQ_CLUSTER_CMD_MODIFY_TEMP_INFO = 0x34
QQ_CLUSTER_CMD_SEND_TEMP_IM = 0x35
QQ_CLUSTER_CMD_SUB_CLUSTER_OP = 0x36
QQ_CLUSTER_CMD_ACTIVATE_TEMP = 0x37
QQ_CLUSTER_SUB_CMD_ADD_MEMBER = 0x01
QQ_CLUSTER_SUB_CMD_REMOVE_MEMBER = 0x02
QQ_CLUSTER_SUB_CMD_GET_SUBJECT_LIST = 0x02
QQ_CLUSTER_SUB_CMD_GET_DIALOG_LIST = 0x01
QQ_SUB_CMD_GET_ONLINE_FRIEND = 0x2
QQ_SUB_CMD_GET_ONLINE_SERVICE = 0x3
QQ_SUB_CMD_UPLOAD_GROUP_NAME = 0x2
QQ_SUB_CMD_DOWNLOAD_GROUP_NAME = 0x1
QQ_SUB_CMD_SEND_TEMP_SESSION_IM = 0x01
QQ_SUB_CMD_BATCH_DOWNLOAD_FRIEND_REMARK = 0x0
QQ_SUB_CMD_UPLOAD_FRIEND_REMARK = 0x1
QQ_SUB_CMD_REMOVE_FRIEND_FROM_LIST = 0x2
QQ_SUB_CMD_DOWNLOAD_FRIEND_REMARK = 0x3
QQ_SUB_CMD_MODIFY_SIGNATURE = 0x01
QQ_SUB_CMD_DELETE_SIGNATURE = 0x02
QQ_SUB_CMD_GET_SIGNATURE = 0x03
QQ_SUB_CMD_GET_USER_PROPERTY = 0x01
QQ_SUB_CMD_GET_WEATHER = 0x01
QQ_FILE_CMD_HEART_BEAT = 0x0001
QQ_FILE_CMD_HEART_BEAT_ACK = 0x0002
QQ_FILE_CMD_TRANSFER_FINISHED = 0x0003
QQ_FILE_CMD_FILE_OP = 0x0007
QQ_FILE_CMD_FILE_OP_ACK = 0x0008
QQ_FILE_CMD_SENDER_SAY_HELLO = 0x0031
QQ_FILE_CMD_SENDER_SAY_HELLO_ACK = 0x0032
QQ_FILE_CMD_RECEIVER_SAY_HELLO = 0x0033
QQ_FILE_CMD_RECEIVER_SAY_HELLO_ACK = 0x0034
QQ_FILE_CMD_NOTIFY_IP_ACK = 0x003C
QQ_FILE_CMD_PING = 0x003D
QQ_FILE_CMD_PONG = 0x003E
QQ_FILE_CMD_YES_I_AM_BEHIND_FIREWALL = 0x0040
QQ_FILE_CMD_REQUEST_AGENT = 0x0001
QQ_FILE_CMD_CHECK_IN = 0x0002
QQ_FILE_CMD_FORWARD = 0x0003
QQ_FILE_CMD_FORWARD_FINISHED = 0x0004
QQ_FILE_CMD_IT_IS_TIME = 0x0005
QQ_FILE_CMD_I_AM_READY = 0x0006
command_str = {
0x0001: "QQ_CMD_LOGOUT",
0x0002: "QQ_CMD_KEEP_ALIVE",
0x0004: "QQ_CMD_MODIFY_INFO",
0x0005: "QQ_CMD_SEARCH_USER",
0x0006: "QQ_CMD_GET_USER_INFO",
0x0009: "QQ_CMD_ADD_FRIEND",
0x000A: "QQ_CMD_DELETE_FRIEND",
0x000B: "QQ_CMD_ADD_FRIEND_AUTH",
0x000D: "QQ_CMD_CHANGE_STATUS",
0x0012: "QQ_CMD_ACK_SYS_MSG",
0x0016: "QQ_CMD_SEND_IM",
0x0017: "QQ_CMD_RECV_IM",
0x001C: "QQ_CMD_REMOVE_SELF",
0x001D: "QQ_CMD_REQUEST_KEY",
0x0022: "QQ_CMD_LOGIN",
0x0026: "QQ_CMD_GET_FRIEND_LIST",
0x0027: "QQ_CMD_GET_ONLINE_OP",
0x002D: "QQ_CMD_SEND_SMS",
0x0030: "QQ_CMD_CLUSTER_CMD",
0x0031: "QQ_CMD_TEST",
0x003C: "QQ_CMD_GROUP_DATA_OP",
0x003D: "QQ_CMD_UPLOAD_GROUP_FRIEND",
0x003E: "QQ_CMD_FRIEND_DATA_OP",
0x0058: "QQ_CMD_DOWNLOAD_GROUP_FRIEND",
0x005C: "QQ_CMD_FRIEND_LEVEL_OP",
0x005E: "QQ_CMD_PRIVACY_DATA_OP",
0x005F: "QQ_CMD_CLUSTER_DATA_OP",
0x0061: "QQ_CMD_ADVANCED_SEARCH",
0x0062: "QQ_CMD_REQUEST_LOGIN_TOKEN",
0x0065: "QQ_CMD_USER_PROPERTY_OP",
0x0066: "QQ_CMD_TEMP_SESSION_OP",
0x0067: "QQ_CMD_SIGNATURE_OP",
0x0080: "QQ_CMD_RECV_MSG_SYS",
0x0081: "QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS",
0x00A6: "QQ_CMD_WEATHER_OP",
0x00A7: "QQ_CMD_ADD_FRIEND_EX",
0x00A8: "QQ_CMD_AUTHORIZE",
0xFFFF: "QQ_CMD_UNKNOWN",
0x0021: "_CMD_REQUEST_AGENT",
0x0022: "_CMD_REQUEST_FACE",
0x0023: "_CMD_TRANSFER",
0x0026: "_CMD_REQUEST_BEGIN",
}
class QQBasicPacket(Packet):
__hdr__ = (
('header_type', 'B', 2),
('source', 'H', 0),
('command', 'H', 0),
('sequence', 'H', 0),
('qqNum', 'L', 0),
)
class QQ3Packet(Packet):
__hdr__ = (
('header_type', 'B', 3),
('command', 'B', 0),
('sequence', 'H', 0),
('unknown1', 'L', 0),
('unknown2', 'L', 0),
('unknown3', 'L', 0),
('unknown4', 'L', 0),
('unknown5', 'L', 0),
('unknown6', 'L', 0),
('unknown7', 'L', 0),
('unknown8', 'L', 0),
('unknown9', 'L', 0),
('unknown10', 'B', 1),
('unknown11', 'B', 0),
('unknown12', 'B', 0),
('source', 'H', 0),
('unknown13', 'B', 0),
)
class QQ5Packet(Packet):
__hdr__ = (
('header_type', 'B', 5),
('source', 'H', 0),
('unknown', 'H', 0),
('command', 'H', 0),
('sequence', 'H', 0),
('qqNum', 'L', 0),
)
| |
import base64
import cookielib
import re
import os
import traceback
import urllib
import sys
from urlparse import parse_qs
from saml2 import BINDING_HTTP_REDIRECT, class_name
from saml2 import BINDING_HTTP_POST
from saml2.request import SERVICE2REQUEST
from saml2.sigver import signed_instance_factory, pre_signature_part
from saml2.httputil import HttpParameters
from saml2test import CheckError, FatalError
from saml2test.check import Check
from saml2test.check import ExpectedError
from saml2test.check import INTERACTION
from saml2test.check import STATUSCODE
from saml2test.interaction import Action
from saml2test.interaction import Interaction
from saml2test.interaction import InteractionNeeded
import xmldsig as ds
from sp_test.tests import ErrorResponse
from sp_test.check import VerifyEchopageContents
__author__ = 'rolandh'
import logging
logger = logging.getLogger(__name__)
FILE_EXT = {"text/html": "html", "test/plain": "txt", "application/json": "json",
"text/xml": "xml", "application/xml": "xml", }
camel2underscore = re.compile('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))')
class Conversation():
def __init__(self, instance, config, interaction, json_config,
check_factory, entity_id, msg_factory=None,
features=None, constraints=None, # verbose=False,
expect_exception=None, commandlineargs=None):
self.instance = instance
self._config = config
self.test_output = []
self.features = features
#self.verbose = verbose # removed (not used)
self.check_factory = check_factory
self.msg_factory = msg_factory
self.expect_exception = expect_exception
self.commandlineargs = commandlineargs
self.cjar = {"browser": cookielib.CookieJar(),
"rp": cookielib.CookieJar(),
"service": cookielib.CookieJar()}
self.protocol_response = []
self.last_response = None
self.last_content = None
self.response = None
self.interaction = Interaction(self.instance, interaction)
self.exception = None
self.entity_id = entity_id
self.cjar = {"rp": cookielib.CookieJar()}
self.args = {}
self.qargs = {}
self.response_args = {}
self.saml_response = []
self.destination = ""
self.request = None
self.position = ""
self.response = None
self.oper = None
self.msg_constraints = constraints
self.json_config = json_config
self.start_page = json_config["start_page"]
def check_severity(self, stat):
if stat["status"] >= 3:
logger.error("WHERE: %s" % stat["id"])
logger.error("STATUS:%s" % STATUSCODE[stat["status"]])
try:
logger.error("HTTP STATUS: %s" % stat["http_status"])
except KeyError:
pass
try:
logger.error("INFO: %s" % stat["message"])
except KeyError:
pass
raise CheckError
def do_check(self, test, **kwargs):
if isinstance(test, basestring):
chk = self.check_factory(test)(**kwargs)
else:
chk = test(**kwargs)
if not chk.call_on_redirect() and \
300 < self.last_response.status_code <= 303:
pass
else:
stat = chk(self, self.test_output)
self.check_severity(stat)
def err_check(self, test, err=None, bryt=True):
if err:
self.exception = err
chk = self.check_factory(test)()
chk(self, self.test_output)
if bryt:
e = FatalError("%s" % err)
e.trace = "".join(traceback.format_exception(*sys.exc_info()))
raise e
def test_sequence(self, sequence):
if sequence is None:
return True
for test in sequence:
if isinstance(test, tuple):
test, kwargs = test
else:
kwargs = {}
self.do_check(test, **kwargs)
if test == ExpectedError:
return False # TODO: return value is unused
return True
def my_endpoints(self):
for serv in ["aa", "aq", "idp"]:
endpoints = self._config.getattr("endpoints", serv)
if endpoints:
for typ, spec in endpoints.items():
for url, binding in spec:
yield url
def which_endpoint(self, url):
for serv in ["aa", "aq", "idp"]:
endpoints = self._config.getattr("endpoints", serv)
if endpoints:
for typ, spec in endpoints.items():
for endp, binding in spec:
if url.startswith(endp):
return typ, binding
return None
def _log_response(self, response):
"""Depending on -k argument write content to either logger or extra file
Create the <operation> directory; delete all possibly existing files
Write response content into response_x.<ext> (with x incrementing from 0)
"""
logger.info("<-- Status: %s" % response.status_code)
if response.status_code in [302, 301, 303]:
logger.info("<-- location: %s" %
response.headers._store['location'][1])
else:
if self.commandlineargs.content_log:
self._content_log_fileno = getattr(self, '_content_log_fileno', 0) + 1
if not getattr(self, 'logcontentpath', None):
try:
content_type_hdr = response.headers._store['content-type'][1]
l = content_type_hdr.split(';') + ['charset=ISO-8859-1',]
content_type = l[0]
encoding = l[1].split("=")
ext = "." + FILE_EXT[content_type]
except Exception as e:
ext = ""
self._logcontentpath = os.path.join(
self.commandlineargs.logpath,
self.commandlineargs.oper)
if not os.path.exists(self._logcontentpath):
os.makedirs(self._logcontentpath)
for fn in os.listdir(self._logcontentpath):
old_file = os.path.join(self._logcontentpath, fn)
if os.path.isfile(old_file):
os.unlink(old_file)
fn = os.path.join(self._logcontentpath, "response_%d%s"
% (self._content_log_fileno, ext ))
f = open(fn, "w")
f.write(response.content)
f.close()
logger.info("<-- Response content (encoding=%s) in file %s" %
(encoding, fn))
pass
else:
logger.info("<-- Content: %s" % response.content)
def wb_send_GET_startpage(self):
"""
The action that starts the whole sequence, a HTTP GET on a web page
"""
self.last_response = self.instance.send(self.start_page)
self._log_response(self.last_response)
def handle_result(self, check_response=None):
if check_response:
if isinstance(check_response(), Check):
if 300 < self.last_response.status_code <= 303:
self._redirect(self.last_response)
self.do_check(check_response)
else:
# A HTTP redirect or HTTP Post
if 300 < self.last_response.status_code <= 303:
self._redirect(self.last_response)
if self.last_response.status_code >= 400:
raise FatalError(self.last_response.reason)
_txt = self.last_response.content
assert _txt.startswith("<h2>")
else:
if 300 < self.last_response.status_code <= 303:
self._redirect(self.last_response)
_txt = self.last_response.content
if self.last_response.status_code >= 400:
raise FatalError("Did not expected error")
def parse_saml_message(self):
try:
url, query = self.last_response.headers["location"].split("?")
except KeyError:
return
_dict = parse_qs(query)
try:
self.relay_state = _dict["RelayState"][0]
except KeyError:
self.relay_state = ""
_str = _dict["SAMLRequest"][0]
self.saml_request = self.instance._parse_request(
_str, SERVICE2REQUEST[self._endpoint], self._endpoint,
self._binding)
if self._binding == BINDING_HTTP_REDIRECT:
self.http_parameters = HttpParameters(_dict)
def _redirect(self, _response):
rdseq = []
url = None
while _response.status_code in [302, 301, 303]:
url = _response.headers["location"]
if url in rdseq:
raise FatalError("Loop detected in redirects")
else:
rdseq.append(url)
if len(rdseq) > 8:
raise FatalError(
"Too long sequence of redirects: %s" % rdseq)
logger.info("--> REDIRECT TO: %s" % url)
# If back to me
for_me = False
try:
self._endpoint, self._binding = self.which_endpoint(url)
for_me = True
except TypeError:
pass
if for_me:
break
else:
try:
_response = self.instance.send(url, "GET")
except Exception, err:
raise FatalError("%s" % err)
self._log_response(_response)
self.last_response = _response
if _response.status_code >= 400:
break
return url
def send_idp_response(self, req_flow, resp_flow):
"""
:param req_flow: The flow to check the request
:param resp_flow: The flow to prepare the response
:return: The SP's HTTP response on receiving the SAML response
"""
# Pick information from the request that should be in the response
args = self.instance.response_args(self.saml_request.message,
[resp_flow._binding])
_mods = list(resp_flow.__mro__[:])
_mods.reverse()
for m in _mods:
try:
args.update(self.json_config["args"][m.__name__])
except KeyError:
pass
args.update(resp_flow._response_args)
for param in ["identity", "userid"]:
if param in self.json_config:
args[param] = self.json_config[param]
if resp_flow == ErrorResponse:
func = getattr(self.instance, "create_error_response")
else:
_op = camel2underscore.sub(r'_\1', req_flow._class.c_tag).lower()
func = getattr(self.instance, "create_%s_response" % _op)
# get from config which parts shall be signed
sign = []
for styp in ["sign_assertion", "sign_response"]:
if styp in args:
try:
if args[styp].lower() == "always":
sign.append(styp)
del args[styp]
except (AttributeError, TypeError):
raise AssertionError('config parameters "sign_assertion", '
'"sign_response" must be of type string')
response = func(**args)
response = resp_flow(self).pre_processing(response)
# and now for signing
if sign:
to_sign = []
try:
_digest_alg=args["sign_digest_alg"]
except KeyError:
_digest_alg=None
try:
_sign_alg=args["sign_signature_alg"]
except KeyError:
_sign_alg=None
# Order is important, first assertion and then response if both
if "sign_assertion" in sign:
to_sign = [(class_name(response.assertion),
response.assertion.id)]
response.assertion.signature = pre_signature_part(
response.assertion.id, self.instance.sec.my_cert, 1,
digest_alg=_digest_alg, sign_alg=_sign_alg)
if "sign_response" in sign:
to_sign = [(class_name(response), response.id)]
response.signature = pre_signature_part(
response.id, self.instance.sec.my_cert, 1,
digest_alg=_digest_alg, sign_alg=_sign_alg)
response = signed_instance_factory(response, self.instance.sec,
to_sign)
info = self.instance.apply_binding(resp_flow._binding, response,
args["destination"],
self.relay_state,
"SAMLResponse", resp_flow._sign)
if resp_flow._binding == BINDING_HTTP_REDIRECT:
url = None
for param, value in info["headers"]:
if param == "Location":
url = value
break
self.last_response = self.instance.send(url)
elif resp_flow._binding == BINDING_HTTP_POST:
resp_flow = base64.b64encode("%s" % response)
info["data"] = urllib.urlencode({"SAMLResponse": resp_flow,
"RelayState": self.relay_state})
info["method"] = "POST"
info["headers"] = {
'Content-type': 'application/x-www-form-urlencoded'}
self.last_response = self.instance.send(**info)
try:
self.last_content = self.last_response.content
except AttributeError:
self.last_content = None
self._log_response(self.last_response)
def do_flow(self, flow, mid_tests):
"""
Solicited or 'un-solicited' flows.
Solicited always starts with the Web client accessing a page.
Un-solicited starts with the IDP sending a SAMl Response.
"""
if len(flow) >= 3:
self.wb_send_GET_startpage()
self.intermit(flow[0]._interaction)
self.parse_saml_message()
# make sure I got the request I expected
assert isinstance(self.saml_request.message, flow[1]._class)
try:
self.test_sequence(mid_tests)
except KeyError:
pass
self.send_idp_response(flow[1], flow[2])
if len(flow) == 4:
self.handle_result(flow[3])
else:
self.handle_result()
def do_sequence_and_tests(self, oper, tests=None):
self.current_oper = oper
try:
self.test_sequence(tests["pre"])
except KeyError:
pass
for flow in oper:
try:
self.do_flow(flow, tests["mid"])
except InteractionNeeded:
self.test_output.append({"status": INTERACTION,
"message": "see detail log for response content",
"id": "exception",
"name": "interaction needed",
"url": self.position})
break
except FatalError:
raise
except Exception as err:
#self.err_check("exception", err)
raise
try:
self.test_sequence(tests["post"])
except KeyError:
pass
def intermit(self, page_types):
"""
Currently handles only SP-issued redirects
:param page_types: not used (could be used to implement wayf, disco)
"""
_response = self.last_response
_last_action = None
_same_actions = 0
if _response.status_code >= 400:
try:
self.last_content = _response.text
except AttributeError:
self.last_content = None
raise FatalError(
"HTTP response status code: %d" % _response.status_code)
url = _response.url
content = _response.text
done = False
while not done:
rdseq = []
while _response.status_code in [302, 301, 303]:
url = _response.headers["location"]
if url in rdseq:
raise FatalError("Loop detected in redirects")
else:
rdseq.append(url)
if len(rdseq) > 8:
raise FatalError(
"Too long sequence of redirects: %s" % rdseq)
# If back to me
for_me = False
try:
self._endpoint, self._binding = self.which_endpoint(url)
for_me = True
except TypeError:
pass
if for_me:
done = True
break
else:
try:
_response = self.instance.send(url, "GET")
except Exception, err:
raise FatalError("%s" % err)
self._log_response(_response)
content = _response.text
self.position = url
self.last_content = content
self.response = _response
if _response.status_code >= 400:
done = True
break
if done or url is None:
break
_base = url.split("?")[0]
try:
_spec = self.interaction.pick_interaction(_base, content)
except InteractionNeeded:
self.position = url
logger.error("Page Content: %s" % content)
raise
except KeyError:
self.position = url
logger.error("Page Content: %s" % content)
self.err_check("interaction-needed")
if _spec == _last_action:
_same_actions += 1
if _same_actions >= 3:
raise InteractionNeeded("Interaction loop detection")
else:
_last_action = _spec
if len(_spec) > 2:
logger.info(">> %s <<" % _spec["page-type"])
if _spec["page-type"] == "login":
self.login_page = content
_op = Action(_spec["control"])
try:
_response = _op(self.instance, self, logger, url,
_response, content, self.features)
if isinstance(_response, dict):
self.last_response = _response
self.last_content = _response
return _response
content = _response.text
self.position = url
self.last_content = content
self.response = _response
if _response.status_code >= 400:
break
except (FatalError, InteractionNeeded):
raise
except Exception, err:
self.err_check("exception", err, False)
self.last_response = _response
try:
self.last_content = _response.text
except AttributeError:
self.last_content = None
| |
from datetime import date, timedelta
import re
from corehq.apps.reports.standard.cases.data_sources import CaseDisplay
from casexml.apps.case.models import CommCareCase
from django.utils.translation import ugettext as _
import logging
from corehq.util.dates import iso_string_to_datetime
from custom.bihar.calculations.utils.xmlns import BP, NEW, MTB_ABORT, DELIVERY, REGISTRATION, PNC
from couchdbkit.exceptions import ResourceNotFound
from corehq.apps.users.models import CommCareUser, CouchUser
EMPTY_FIELD = "---"
def get_property(dict_obj, name, default=None):
if name in dict_obj:
if type(dict_obj[name]) is dict:
return dict_obj[name]["#value"]
return dict_obj[name]
else:
return default if default is not None else EMPTY_FIELD
class MCHDisplay(CaseDisplay):
def __init__(self, report, case):
try:
self.user = CommCareUser.get_by_user_id(case["user_id"])
if self.user is None:
self.user = CommCareUser.get_by_user_id(case["opened_by"])
except CouchUser.AccountTypeError:
# if we have web users submitting forms (e.g. via cloudcare) just don't bother
# with the rest of this data.
self.user = None
if self.user:
setattr(self, "_village", get_property(self.user.user_data, "village"))
setattr(self, "_asha_name", self.user.full_name if get_property(self.user.user_data, "role").upper() == "ASHA" else get_property(self.user.user_data, "partner_name"))
if get_property(self.user.user_data, "role").upper() == "ASHA":
setattr(self, "_asha_number", self.user.default_phone_number if self.user.default_phone_number else EMPTY_FIELD)
else:
setattr(self, "_asha_number", get_property(self.user.user_data, "partner_phone"))
setattr(self, "_awc_code_name", "%s, %s" % (get_property(self.user.user_data, "awc-code"), get_property(self.user.user_data, "village")))
setattr(self, "_aww_name", self.user.full_name if get_property(self.user.user_data, "role").upper() == "AWW" else get_property(self.user.user_data, "partner_name"))
if get_property(self.user.user_data, "role").upper() == "AWW":
setattr(self, "_aww_number", self.user.phone_numbers[0] if len(self.user.phone_numbers) > 0 else EMPTY_FIELD)
else:
setattr(self, "_aww_number", get_property(self.user.user_data, "partner_phone"))
super(MCHDisplay, self).__init__(report, case)
@property
def village(self):
return getattr(self, "_village", EMPTY_FIELD)
@property
def asha_name(self):
return getattr(self, "_asha_name", EMPTY_FIELD)
@property
def asha_number(self):
return getattr(self, "_asha_number", EMPTY_FIELD)
@property
def awc_code_name(self):
return getattr(self, "_awc_code_name", EMPTY_FIELD)
@property
def aww_name(self):
return getattr(self, "_aww_name", EMPTY_FIELD)
@property
def aww_number(self):
return getattr(self, "_aww_number", EMPTY_FIELD)
@property
def chw_name(self):
if self.user:
return "%s, \"%s\"" % (self.user.username, self.user.full_name)
else:
return _("Unknown user")
@property
def home_sba_assist(self):
return getattr(self, "_home_sba_assist", EMPTY_FIELD)
@property
def caste(self):
return getattr(self, "_caste", EMPTY_FIELD)
def parse_date(self, date_string):
if date_string != EMPTY_FIELD and date_string != '' and date_string is not None:
try:
# assuming it's a date string or datetime string,
# DefaultProperty will wrap it as the correct type
# todo: there has to be a better way
return str(self.report.date_to_json(iso_string_to_datetime(date_string)))
except AttributeError:
return _("Bad date format!")
except TypeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
class MCHMotherDisplay(MCHDisplay):
def __init__(self, report, case_dict):
case = CommCareCase.get(case_dict["_id"])
forms = case.get_forms()
jsy_beneficiary = None
jsy_money = None
pnc_on_time_statuses = []
for form in forms:
form_dict = form.form
form_xmlns = form_dict["@xmlns"]
if NEW in form_xmlns:
setattr(self, "_caste", get_property(form_dict, "caste"))
elif DELIVERY in form_xmlns:
if get_property(form_dict, "where_born") != 'home':
setattr(self, "_home_sba_assist", get_property(form_dict, "where_born"))
else:
setattr(self, "_home_sba_assist", get_property(form_dict, "home_sba_assist"))
setattr(self, "_delivery_nature", get_property(form_dict, "delivery_nature"))
setattr(self, "_discharge_date", get_property(form_dict, "discharge_date"))
setattr(self, "_jsy_money_date", get_property(form_dict, "jsy_money_date"))
setattr(self, "_delivery_complications", get_property(form_dict, "delivery_complications"))
if 'case' in form_dict and 'update' in form_dict['case']:
setattr(self, "_family_planning_type", get_property(form_dict['case']['update'], "family_planning_type"))
jsy_money = get_property(form_dict, "jsy_money")
children_count = int(get_property(form_dict, "cast_num_children", 0))
if children_count == 0:
setattr(self, "_num_children", 'still_birth')
else:
setattr(self, "_num_children", children_count)
child_list = []
if children_count == 1 and "child_info" in form_dict:
child_list.append(form_dict["child_info"])
elif children_count > 1 and "child_info" in form_dict:
child_list = form_dict["child_info"]
for idx,child in enumerate(child_list):
case_child = {}
if "case" in child:
case_child = CommCareCase.get(child["case"]["@case_id"])
setattr(self, "_first_weight_%s" % (idx+1), str(get_property(child, "first_weight")))
setattr(self, "_breastfed_hour_%s" % (idx+1), get_property(child, "breastfed_hour"))
if case_child:
setattr(self, "_case_name_%s" % (idx+1), get_property(case_child, "name"))
setattr(self, "_gender_%s" % (idx+1), get_property(case_child, "gender"))
elif REGISTRATION in form_xmlns:
jsy_beneficiary = get_property(form_dict, "jsy_beneficiary")
elif PNC in form_xmlns:
child_list = []
if isinstance(form_dict["child_info"], list):
child_list.extend(form_dict["child_info"])
else:
child_list.append(form_dict["child_info"])
for child in child_list:
pnc_on_time_status = None
if (get_property(child, 'skin_to_skin') == 'yes' or get_property(child, 'wrapped') == 'yes') and get_property(child, 'warm_to_touch') == 'yes':
pnc_on_time_status = 'yes'
else:
pnc_on_time_status = 'no'
pnc_on_time_statuses.append(pnc_on_time_status)
elif BP in form_xmlns:
if "bp1" in form_dict:
bp = form_dict["bp1"]
for i in range(1, 5):
if "anc%s" % i in bp:
anc = bp["anc%s" % i]
if "anc%s_blood_pressure" % i in anc:
if anc["anc%s_blood_pressure" % i] == 'high_bloodpressure':
anc["anc%s_blood_pressure" % i] = 'high'
setattr(self, "_blood_pressure_%s" % i, anc["anc%s_blood_pressure" % i])
if "anc%s_weight" % i in anc:
setattr(self, "_weight_%s" % i, str(anc["anc%s_weight" % i]))
if "anc%s_hemoglobin" % i in anc and i == 1:
setattr(self, "_hemoglobin", anc["anc%s_hemoglobin" % i])
setattr(self, "_anemia", get_property(bp, "anaemia"))
if "bp2" in form_dict:
bp = form_dict["bp2"]
setattr(self, "_rti_sti", get_property(bp, "rti_sti"))
setattr(self, "_complications", get_property(form_dict, "bp_complications"))
elif MTB_ABORT in form_xmlns:
setattr(self, "_abortion_type", get_property(form_dict, "abortion_type"))
if jsy_beneficiary is not None and jsy_beneficiary != EMPTY_FIELD:
setattr(self, "_jsy_beneficiary", jsy_beneficiary)
else:
setattr(self, "_jsy_beneficiary", jsy_money)
if len(pnc_on_time_statuses) > 0:
if 'yes' in pnc_on_time_statuses:
setattr(self, "_all_pnc_on_time", 'yes')
else:
setattr(self, "_all_pnc_on_time", 'no')
super(MCHMotherDisplay, self).__init__(report, case_dict)
@property
def mother_name(self):
return get_property(self.case, "mother_name")
@property
def husband_name(self):
return get_property(self.case, "husband_name")
@property
def ward_number(self):
return get_property(self.case, "ward_number")
@property
def mobile_number(self):
return get_property(self.case, "mobile_number")
@property
def mobile_number_whose(self):
number = get_property(self.case, "mobile_number_whose")
if re.match(r"^mobile_", number):
r = re.compile(r"^mobile_", re.IGNORECASE)
return r.sub("", number)
else:
return number
@property
def mcts_id(self):
return get_property(self.case, "full_mcts_id")
@property
def dob_age(self):
if "mother_dob" in self.case and self.case["mother_dob"]:
try:
mother_dob = self.case["mother_dob"]
if type(mother_dob) is dict:
mother_dob = mother_dob["#value"]
days = (date.today() - CaseDisplay.parse_date(self, mother_dob).date()).days
mother_dob = self.parse_date(mother_dob)
return "%s, %s" % (mother_dob, days/365)
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
@property
def lmp(self):
return self.parse_date(get_property(self.case, "lmp"))
@property
def edd(self):
return self.parse_date(get_property(self.case, "edd"))
@property
def anc_date_1(self):
return self.parse_date(get_property(self.case, "anc_1_date"))
@property
def anc_date_2(self):
return self.parse_date(get_property(self.case, "anc_2_date"))
@property
def anc_date_3(self):
return self.parse_date(get_property(self.case, "anc_3_date"))
@property
def anc_date_4(self):
return self.parse_date(get_property(self.case, "anc_4_date"))
@property
def tt1_date(self):
return self.parse_date(get_property(self.case, "tt_1_date"))
@property
def tt2_date(self):
return self.parse_date(get_property(self.case, "tt_2_date"))
@property
def tt_booster(self):
return self.parse_date(get_property(self.case, "tt_booster_date"))
@property
def ifa_tablets(self):
return self.parse_date(get_property(self.case, "ifa_tablets_100"))
@property
def add(self):
return self.parse_date(get_property(self.case, "add"))
@property
def first_pnc_time(self):
return get_property(self.case, "first_pnc_time")
@property
def status(self):
return get_property(self.case, "status")
@property
def jsy_beneficiary(self):
return getattr(self, "_jsy_beneficiary", EMPTY_FIELD)
@property
def delivery_nature(self):
return getattr(self, "_delivery_nature", EMPTY_FIELD)
@property
def discharge_date(self):
return self.parse_date(str(getattr(self, "_discharge_date", EMPTY_FIELD)))
@property
def jsy_money_date(self):
return self.parse_date(str(getattr(self, "_jsy_money_date", EMPTY_FIELD)))
@property
def delivery_complications(self):
return getattr(self, "_delivery_complications", EMPTY_FIELD)
@property
def family_planning_type(self):
return getattr(self, "_family_planning_type", EMPTY_FIELD)
@property
def anemia(self):
return getattr(self, "_anemia", EMPTY_FIELD)
@property
def complications(self):
return getattr(self, "_complications", EMPTY_FIELD)
@property
def rti_sti(self):
return getattr(self, "_rti_sti", EMPTY_FIELD)
@property
def abortion_type(self):
return getattr(self, "_abortion_type", EMPTY_FIELD)
@property
def blood_pressure_1(self):
return getattr(self, "_blood_pressure_1", EMPTY_FIELD)
@property
def blood_pressure_2(self):
return getattr(self, "_blood_pressure_2", EMPTY_FIELD)
@property
def blood_pressure_3(self):
return getattr(self, "_blood_pressure_3", EMPTY_FIELD)
@property
def blood_pressure_4(self):
return getattr(self, "_blood_pressure_4", EMPTY_FIELD)
@property
def weight_1(self):
return getattr(self, "_weight_1", EMPTY_FIELD)
@property
def weight_2(self):
return getattr(self, "_weight_2", EMPTY_FIELD)
@property
def weight_3(self):
return getattr(self, "_weight_3", EMPTY_FIELD)
@property
def weight_4(self):
return getattr(self, "_weight_4", EMPTY_FIELD)
@property
def hemoglobin(self):
return getattr(self, "_hemoglobin", EMPTY_FIELD)
@property
def anc_completed(self):
lmp = self.lmp
anc_date_1 = self.anc_date_1
if lmp != EMPTY_FIELD and anc_date_1 != EMPTY_FIELD:
try:
return _("yes") if CaseDisplay.parse_date(self, self.anc_date_1) < (CaseDisplay.parse_date(self, self.lmp) + timedelta(days=12*7)) else _("no")
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
@property
def all_pnc_on_time(self):
return getattr(self, "_all_pnc_on_time", EMPTY_FIELD)
@property
def num_children(self):
return getattr(self, "_num_children", EMPTY_FIELD)
@property
def case_name_1(self):
return getattr(self, "_case_name_1", EMPTY_FIELD)
@property
def case_name_2(self):
return getattr(self, "_case_name_2", EMPTY_FIELD)
@property
def case_name_3(self):
return getattr(self, "_case_name_3", EMPTY_FIELD)
@property
def case_name_4(self):
return getattr(self, "_case_name_4", EMPTY_FIELD)
@property
def gender_1(self):
return getattr(self, "_gender_1", EMPTY_FIELD)
@property
def gender_2(self):
return getattr(self, "_gender_2", EMPTY_FIELD)
@property
def gender_3(self):
return getattr(self, "_gender_3", EMPTY_FIELD)
@property
def gender_4(self):
return getattr(self, "_gender_4", EMPTY_FIELD)
@property
def first_weight_1(self):
return getattr(self, "_first_weight_1", EMPTY_FIELD)
@property
def first_weight_2(self):
return getattr(self, "_first_weight_2", EMPTY_FIELD)
@property
def first_weight_3(self):
return getattr(self, "_first_weight_3", EMPTY_FIELD)
@property
def first_weight_4(self):
return getattr(self, "_first_weight_4", EMPTY_FIELD)
@property
def breastfed_hour_1(self):
return getattr(self, "_breastfed_hour_1", EMPTY_FIELD)
@property
def breastfed_hour_2(self):
return getattr(self, "_breastfed_hour_2", EMPTY_FIELD)
@property
def breastfed_hour_3(self):
return getattr(self, "_breastfed_hour_3", EMPTY_FIELD)
@property
def breastfed_hour_4(self):
return getattr(self, "_breastfed_hour_4", EMPTY_FIELD)
class MCHChildDisplay(MCHDisplay):
def __init__(self, report, case_dict):
# get mother case
if len(case_dict["indices"]) > 0:
try:
parent_case = CommCareCase.get(case_dict["indices"][0]["referenced_id"])
forms = parent_case.get_forms()
parent_json = parent_case.case_properties()
setattr(self, "_father_mother_name", "%s, %s" %(get_property(parent_json,"husband_name"), get_property(parent_json, "mother_name")))
setattr(self, "_full_mcts_id", get_property(parent_json, "full_mcts_id"))
setattr(self, "_ward_number", get_property(parent_json, "ward_number"))
setattr(self, "_mobile_number", get_property(parent_case, 'mobile_number'))
number = get_property(parent_case, "mobile_number_whose")
if re.match(r"^mobile_", number):
r = re.compile(r"^mobile_", re.IGNORECASE)
setattr(self, "_mobile_number_whose", r.sub("", number))
else:
setattr(self, "_mobile_number_whose", number)
for form in forms:
form_dict = form.form
form_xmlns = form_dict["@xmlns"]
if NEW in form_xmlns:
setattr(self, "_caste", get_property(form_dict, "caste"))
elif DELIVERY in form_xmlns:
if get_property(form_dict, "where_born") != 'home':
setattr(self, "_home_sba_assist", get_property(form_dict, "where_born"))
else:
setattr(self, "_home_sba_assist", get_property(form_dict, "home_sba_assist"))
except ResourceNotFound:
logging.error("ResourceNotFound: " + case_dict["indices"][0]["referenced_id"])
super(MCHChildDisplay, self).__init__(report, case_dict)
@property
def child_name(self):
return get_property(self.case, "name")
@property
def father_mother_name(self):
return getattr(self, "_father_mother_name", EMPTY_FIELD)
@property
def mcts_id(self):
return getattr(self, "_full_mcts_id", EMPTY_FIELD)
@property
def ward_number(self):
return getattr(self, "_ward_number", EMPTY_FIELD)
@property
def gender(self):
return get_property(self.case, "gender")
@property
def mobile_number(self):
return getattr(self, "_mobile_number", EMPTY_FIELD)
@property
def mobile_number_whose(self):
return getattr(self, "_mobile_number_whose", EMPTY_FIELD)
@property
def bcg_date(self):
return self.parse_date(get_property(self.case, "bcg_date"))
@property
def opv_0_date(self):
return self.parse_date(get_property(self.case, "opv_0_date"))
@property
def hep_b_0_date(self):
return self.parse_date(get_property(self.case, "hep_b_0_date"))
@property
def dpt_1_date(self):
return self.parse_date(get_property(self.case, "dpt_1_date"))
@property
def opv_1_date(self):
return self.parse_date(get_property(self.case, "opv_1_date"))
@property
def hep_b_1_date(self):
return self.parse_date(get_property(self.case, "hep_b_1_date"))
@property
def dpt_2_date(self):
return self.parse_date(get_property(self.case, "dpt_2_date"))
@property
def opv_2_date(self):
return self.parse_date(get_property(self.case, "opv_2_date"))
@property
def hep_b_2_date(self):
return self.parse_date(get_property(self.case, "hep_b_2_date"))
@property
def dpt_3_date(self):
return self.parse_date(get_property(self.case, "dpt_3_date"))
@property
def opv_3_date(self):
return self.parse_date(get_property(self.case, "opv_3_date"))
@property
def hep_b_3_date(self):
return self.parse_date(get_property(self.case, "hep_b_3_date"))
@property
def measles_date(self):
return self.parse_date(get_property(self.case, "measles_date"))
@property
def vit_a_1_date(self):
return self.parse_date(get_property(self.case, "vit_a_1_date"))
@property
def date_measles_booster(self):
return self.parse_date(get_property(self.case, "date_measles_booster"))
@property
def dpt_booster_date(self):
return self.parse_date(get_property(self.case, "dpt_booster_date"))
@property
def opv_booster_date(self):
return self.parse_date(get_property(self.case, "opv_booster_date"))
@property
def vit_a_2_date(self):
return self.parse_date(get_property(self.case, "vit_a_2_date"))
@property
def vit_a_3_date(self):
return self.parse_date(get_property(self.case, "vit_a_3_date"))
@property
def date_je(self):
return self.parse_date(get_property(self.case, "date_je"))
@property
def dob_age(self):
if "dob" in self.case and self.case["dob"]:
try:
dob = self.case["dob"]
if type(dob) is dict:
dob = dob["#value"]
days = (date.today() - CaseDisplay.parse_date(self, dob).date()).days
dob = self.parse_date(dob)
return "%s, %s" % (dob, int(days/365.25))
except AttributeError:
return _("Bad date format!")
else:
return EMPTY_FIELD
| |
"""Environment for training multi-agent experiments."""
from copy import deepcopy
import numpy as np
import random
import traceback
from gym.spaces import Box
from traci.exceptions import FatalTraCIError
from traci.exceptions import TraCIException
from ray.rllib.env import MultiAgentEnv
from flow.envs.base_env import Env
from flow.utils.exceptions import FatalFlowError
class MultiEnv(MultiAgentEnv, Env):
"""Multi-agent version of base env. See parent class for info."""
def step(self, rl_actions):
"""Advance the environment by one step.
Assigns actions to autonomous and human-driven agents (i.e. vehicles,
traffic lights, etc...). Actions that are not assigned are left to the
control of the simulator. The actions are then used to advance the
simulator by the number of time steps requested per environment step.
Results from the simulations are processed through various classes,
such as the Vehicle and TrafficLight kernels, to produce standardized
methods for identifying specific network state features. Finally,
results from the simulator are used to generate appropriate
observations.
Parameters
----------
rl_actions : array_like
an list of actions provided by the rl algorithm
Returns
-------
observation : dict of array_like
agent's observation of the current environment
reward : dict of floats
amount of reward associated with the previous state/action pair
done : dict of bool
indicates whether the episode has ended
info : dict
contains other diagnostic information from the previous action
"""
for _ in range(self.env_params.sims_per_step):
self.time_counter += 1
self.step_counter += 1
# perform acceleration actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_ids()) > 0:
accel = []
for veh_id in self.k.vehicle.get_controlled_ids():
accel_contr = self.k.vehicle.get_acc_controller(veh_id)
action = accel_contr.get_action(self)
accel.append(action)
self.k.vehicle.apply_acceleration(
self.k.vehicle.get_controlled_ids(), accel)
# perform lane change actions for controlled human-driven vehicles
if len(self.k.vehicle.get_controlled_lc_ids()) > 0:
direction = []
for veh_id in self.k.vehicle.get_controlled_lc_ids():
target_lane = self.k.vehicle.get_lane_changing_controller(
veh_id).get_action(self)
direction.append(target_lane)
self.k.vehicle.apply_lane_change(
self.k.vehicle.get_controlled_lc_ids(),
direction=direction)
# perform (optionally) routing actions for all vehicle in the
# network, including rl and sumo-controlled vehicles
routing_ids = []
routing_actions = []
for veh_id in self.k.vehicle.get_ids():
if self.k.vehicle.get_routing_controller(veh_id) is not None:
routing_ids.append(veh_id)
route_contr = self.k.vehicle.get_routing_controller(veh_id)
routing_actions.append(route_contr.choose_route(self))
self.k.vehicle.choose_routes(routing_ids, routing_actions)
self.apply_rl_actions(rl_actions)
self.additional_command()
# advance the simulation in the simulator by one step
self.k.simulation.simulation_step()
# store new observations in the vehicles and traffic lights class
self.k.update(reset=False)
# update the colors of vehicles
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
# crash encodes whether the simulator experienced a collision
crash = self.k.simulation.check_collision()
# stop collecting new simulation steps if there is a collision
if crash:
break
states = self.get_state()
done = {key: key in self.k.vehicle.get_arrived_ids()
for key in states.keys()}
if crash:
done['__all__'] = True
else:
done['__all__'] = False
infos = {key: {} for key in states.keys()}
# compute the reward
if self.env_params.clip_actions:
clipped_actions = self.clip_actions(rl_actions)
reward = self.compute_reward(clipped_actions, fail=crash)
else:
reward = self.compute_reward(rl_actions, fail=crash)
return states, reward, done, infos
def reset(self, new_inflow_rate=None):
"""Reset the environment.
This method is performed in between rollouts. It resets the state of
the environment, and re-initializes the vehicles in their starting
positions.
If "shuffle" is set to True in InitialConfig, the initial positions of
vehicles is recalculated and the vehicles are shuffled.
Returns
-------
observation : dict of array_like
the initial observation of the space. The initial reward is assumed
to be zero.
"""
# reset the time counter
self.time_counter = 0
# warn about not using restart_instance when using inflows
if len(self.net_params.inflows.get()) > 0 and \
not self.sim_params.restart_instance:
print(
"**********************************************************\n"
"**********************************************************\n"
"**********************************************************\n"
"WARNING: Inflows will cause computational performance to\n"
"significantly decrease after large number of rollouts. In \n"
"order to avoid this, set SumoParams(restart_instance=True).\n"
"**********************************************************\n"
"**********************************************************\n"
"**********************************************************"
)
if self.sim_params.restart_instance or \
(self.step_counter > 2e6 and self.simulator != 'aimsun'):
self.step_counter = 0
# issue a random seed to induce randomness into the next rollout
self.sim_params.seed = random.randint(0, 1e5)
self.k.vehicle = deepcopy(self.initial_vehicles)
self.k.vehicle.master_kernel = self.k
# restart the sumo instance
self.restart_simulation(self.sim_params)
# perform shuffling (if requested)
elif self.initial_config.shuffle:
self.setup_initial_state()
# clear all vehicles from the network and the vehicles class
if self.simulator == 'traci':
for veh_id in self.k.kernel_api.vehicle.getIDList(): # FIXME: hack
try:
self.k.vehicle.remove(veh_id)
except (FatalTraCIError, TraCIException):
print(traceback.format_exc())
# clear all vehicles from the network and the vehicles class
# FIXME (ev, ak) this is weird and shouldn't be necessary
for veh_id in list(self.k.vehicle.get_ids()):
# do not try to remove the vehicles from the network in the first
# step after initializing the network, as there will be no vehicles
if self.step_counter == 0:
continue
try:
self.k.vehicle.remove(veh_id)
except (FatalTraCIError, TraCIException):
print("Error during start: {}".format(traceback.format_exc()))
# reintroduce the initial vehicles to the network
for veh_id in self.initial_ids:
type_id, edge, lane_index, pos, speed = \
self.initial_state[veh_id]
try:
self.k.vehicle.add(
veh_id=veh_id,
type_id=type_id,
edge=edge,
lane=lane_index,
pos=pos,
speed=speed)
except (FatalTraCIError, TraCIException):
# if a vehicle was not removed in the first attempt, remove it
# now and then reintroduce it
self.k.vehicle.remove(veh_id)
if self.simulator == 'traci':
self.k.kernel_api.vehicle.remove(veh_id) # FIXME: hack
self.k.vehicle.add(
veh_id=veh_id,
type_id=type_id,
edge=edge,
lane=lane_index,
pos=pos,
speed=speed)
# advance the simulation in the simulator by one step
self.k.simulation.simulation_step()
# update the information in each kernel to match the current state
self.k.update(reset=True)
# update the colors of vehicles
if self.sim_params.render:
self.k.vehicle.update_vehicle_colors()
# check to make sure all vehicles have been spawned
if len(self.initial_ids) > self.k.vehicle.num_vehicles:
missing_vehicles = list(
set(self.initial_ids) - set(self.k.vehicle.get_ids()))
msg = '\nNot enough vehicles have spawned! Bad start?\n' \
'Missing vehicles / initial state:\n'
for veh_id in missing_vehicles:
msg += '- {}: {}\n'.format(veh_id, self.initial_state[veh_id])
raise FatalFlowError(msg=msg)
# perform (optional) warm-up steps before training
for _ in range(self.env_params.warmup_steps):
observation, _, _, _ = self.step(rl_actions=None)
# render a frame
self.render(reset=True)
return self.get_state()
def clip_actions(self, rl_actions=None):
"""Clip the actions passed from the RL agent.
If no actions are provided at any given step, the rl agents default to
performing actions specified by sumo.
Parameters
----------
rl_actions : array_like
list of actions provided by the RL algorithm
Returns
-------
rl_clipped : array_like
The rl_actions clipped according to the box
"""
# ignore if no actions are issued
if rl_actions is None:
return None
# clip according to the action space requirements
if isinstance(self.action_space, Box):
for key, action in rl_actions.items():
rl_actions[key] = np.clip(
action,
a_min=self.action_space.low,
a_max=self.action_space.high)
return rl_actions
def apply_rl_actions(self, rl_actions=None):
"""Specify the actions to be performed by the rl agent(s).
If no actions are provided at any given step, the rl agents default to
performing actions specified by sumo.
Parameters
----------
rl_actions : dict of array_like
dict of list of actions provided by the RL algorithm
"""
# ignore if no actions are issued
if rl_actions is None:
return
# clip according to the action space requirements
clipped_actions = self.clip_actions(rl_actions)
self._apply_rl_actions(clipped_actions)
| |
import bpy
import mathutils
from mathutils import Vector
import csv
#from dateutil.parser import parse
from datetime import datetime
from math import ceil
import os
# If true, we only create a subset of data points.
DEBUG=False
# If DEBUG is true, this is how frequently to take a sample from the dataset
# ie. 5 would take every 5th row from the csv.
MOD_DEBUG = 5
# Switch for city: sf, istanbul, or ottawa
CITY = "sf"
# csv path
CSV_PATH = 'TODO: currently 1:1 data set for cities, so hardcoded below'
# Absolute path to your repo.
path, filename = os.path.split(os.path.dirname(os.path.realpath(__file__)))
REPO_PATH = path + '/'
def run():
global CSV_PATH
# Setup Scene.
scn = bpy.context.scene
scn.frame_start = 1
scn.frame_end = 801
bpy.context.scene.layers[3] = True
# array index 3 maps to layer 4
dynamic_layer = 3
# Delete dynamic objects.
generated_objects = [ob for ob in bpy.context.scene.objects if ob.layers[dynamic_layer]]
for obj in generated_objects:
print(obj)
obj.select =True
bpy.ops.object.delete()
# hide city layers :TODO: smart enable based on which city we're doing.
bpy.context.scene.layers[0] = False
bpy.context.scene.layers[1] = False
bpy.context.scene.layers[2] = False
bpy.context.scene.layers[dynamic_layer] = True # sets dynamic layer to active.
createLight()
createWater()
# TODO: hide/unhide right layers for each city.
if CITY == "ottawa":
CSV_PATH = REPO_PATH + 'data/ottawa-publicly-accessible-computers.csv'
addObjects(getOttawaData())
createOttawaCamera()
bpy.context.scene.layers[1] = True
elif CITY == "sf":
CSV_PATH = REPO_PATH + 'data/alcohol_locations.csv'
addObjects(getSfData())
createSfCamera()
bpy.context.scene.layers[0] = True
elif CITY == "istanbul":
CSV_PATH = REPO_PATH + 'data/tweetsIstanbul.csv'
addObjects(getIstanbulData())
createIstanbulCamera()
bpy.context.scene.layers[2] = True
else:
print("unrecognized CITY name, try: sf, ottawa, or istanbul")
def createLight():
# Add two suns, not standard practice...but best lighting.
bpy.ops.object.lamp_add(type='SUN', view_align=False, location=(0, 0, 20))
bpy.ops.transform.rotate(value=0.45, axis=(-0.172023, 0.980755, -0.0923435), constraint_axis=(False, False, False), constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
bpy.ops.object.lamp_add(type='SUN', view_align=False, location=(3, 3, 23))
bpy.ops.transform.rotate(value=0.45, axis=(-0.17, 0.98, -0.09), constraint_axis=(False, False, False), constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
def createWater():
# Add a plane
bpy.ops.mesh.primitive_plane_add(radius=100, location=(0, 0, 0))
mat_name = 'water'
if bpy.data.materials.get(mat_name) is not None:
mat = bpy.data.materials[mat_name]
else:
# create material
mat = bpy.data.materials.new(name=mat_name)
mat.diffuse_color = (0.74,0.74,1.0)
# assign to 1st material slot
ob = bpy.context.object
ob.data.materials.append(mat)
# TODO: named params so it's more readable.
def createOttawaCamera():
createCameraCommon((15, 5, 3.66), (-22, -20, -0.6), (0,0, 9))
def createSfCamera():
createCameraCommon((-24, 50, 3.66), (23, -18, -0.6), (0,0, 5))
def createIstanbulCamera():
createCameraCommon((-24, 50, 2.66), (23, -18, -0.6), (0,0, 7))
# Return an array of objects of the form:
# {x: 123, y:32, z:22, startFrame: 1234, colour: (0.5, 0.2, 0.8), colourName: 'someNameForThisColour'}
def getOttawaData():
return_data = []
mod_counter = 0
reader = csv.DictReader(open(CSV_PATH, newline=''), delimiter=',')
for row in reader:
#print(row['License_Ty'])
mod_counter = mod_counter + 1
if DEBUG and (mod_counter% MOD_DEBUG) != 0:
continue;
#TODO: helper to get lat,lng max, min, and avg to avoid manual calbiration.
return_data.append({
'x': (float(row['lng']) + 75.63) * 120,
'y': (float(row['lat']) - 45.45) * 165,
'z': float(row['COMPUTERS']),
'startFrame': mod_counter + 3 * float(row['COMPUTERS']),
'colour': (0.6, 0.9, 0.6),
'colourName': "MaterialOttawa"
})
return return_data
# Return an array of objects of the form:
# {x: 123, y:32, z:22, startFrame: 1234, colour: (0.5, 0.2, 0.8), colourName: 'someNameForThisColour'}
def getSfData():
mod_counter = 0
return_data = []
reader = csv.DictReader(open(CSV_PATH, newline=''), delimiter=',')
for row in reader:
mod_counter = mod_counter + 1
# Filter which rows are used based on the License_Ty column.
if row['License_Ty'] == '21':
if DEBUG and (mod_counter% MOD_DEBUG) != 0:
continue;
issue_date = row['Orig_Iss_D'].split('/')
decade = (float(issue_date[0][:-1]) - 194)
return_data.append({
'x': (float(row['lng']) + 122.41) * 380,
'y': (float(row['lat']) - 37.7) * 380,
'z': 5.5 + float(issue_date[1])*0.05,
'startFrame': (float(issue_date[0]) - 1949) * 10 + float(issue_date[1])*2 - 30,
'colour': ( decade*0.1, 0.1 + decade*0.15, 0.2 + decade*0.11),
'colourName': "CubeMaterialz" + issue_date[0][:-1] # Truncate last digit of year, to get decade.
})
return return_data
# Return an array of objects of the form:
# {x: 123, y:32, z:22, startFrame: 1234, colour: (0.5, 0.2, 0.8), colourName: 'someNameForThisColour'}
def getIstanbulData():
mod_counter = 0
return_data = []
reader = csv.DictReader(open(CSV_PATH, newline=''), delimiter=',')
for row in reader:
mod_counter = mod_counter + 1
if DEBUG and (mod_counter% MOD_DEBUG) != 0:
continue;
return_data.append({
'x': (float(row['lng']) - 28.996) * 1180,
'y': (float(row['lat']) - 41.008) * 1390,
'z': (float(row['follower_count']) / 100 + 0.2),
'startFrame': mod_counter,
'colour': (0.15, 0.7, 0.7), #TODO off of row["source"]
'colourName': "tweet"
})
return return_data
# Given an ordered array of objects, create objects on the map.
def addObjects(all_points):
for point in all_points:
# print(row)
# TODO: Auto Shift and scale the lat,lng automagically based on the range of values.
# correct shift and scale for lat,long coordinates relative to which is 0,0
x = point['x']
y = point['y']
z = point['z']
# set the starting frame
bpy.context.scene.frame_set(point['startFrame'])
#bpy.ops.anim.change_frame(frame = 1)
bpy.ops.mesh.primitive_cube_add(radius=0.35,location=(x,y,(-z * 0.35)))
bpy.ops.transform.resize(value=(0.05, 0.05, 2*z*0.35), constraint_axis=(False, False, True), constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED', proportional_edit_falloff='SMOOTH', proportional_size=1)
ob = bpy.context.object
me = ob.data
# TODO: abstract to external function
# Get material
mat_name = point['colourName']
if bpy.data.materials.get(mat_name) is not None:
mat = bpy.data.materials[mat_name]
else:
# create material
mat = bpy.data.materials.new(name=mat_name)
mat.diffuse_color = point['colour']
# Assign it to object
if len(ob.data.materials):
# assign to 1st material slot
ob.data.materials[0] = mat
else:
# no slots
ob.data.materials.append(mat)
# TODO: end of material
# Create keyframe
bpy.ops.anim.keyframe_insert_menu(type='Location')
# Move to end keyframe (TODO: add option animation_duration key
appear_frame = point['startFrame'] + 75
bpy.context.scene.frame_set(appear_frame)
# do something with the object. A translation, in this case
bpy.ops.transform.translate(value=(0, 0, z*0.35))
# create keyframe
bpy.ops.anim.keyframe_insert_menu(type='Location')
# TODO: remove all materials we've created and no longer need.
return
def createCameraCommon(start_location, translation1, translation2):
# add camera
bpy.ops.object.camera_add(view_align=True, enter_editmode=False, location=start_location, rotation=(1.5708,0,3.14159))
# Camera is current selected item because we just created camera
bpy.context.object.data.type = 'PANO'
bpy.context.object.data.cycles.panorama_type = 'EQUIRECTANGULAR'
# not working: bpy.context.scene.format = 'MPEG4'
# not working: bpy.context.scene.codec = 'MPEG4'
# set frame to frame 1
bpy.context.scene.frame_set(1)
# snapshot
bpy.ops.anim.keyframe_insert_menu(type='Location')
# move camera to frame 300
bpy.context.scene.frame_set(ceil(bpy.context.scene.frame_end/2))
# move camera down
bpy.ops.transform.translate(value=translation1)
# snapshot (blender will interprit the movement between frames)
bpy.ops.anim.keyframe_insert_menu(type='Location')
# near last frame
bpy.context.scene.frame_set(bpy.context.scene.frame_end - 15)
# move camera up
bpy.ops.transform.translate(value=translation2)
# snapshot (blender will interprit the movement between frames)
bpy.ops.anim.keyframe_insert_menu(type='Location')
if __name__ == "__main__":
run()
| |
'''
Allow simulating a typewriter using texture projection
Ed Swartz, Feb 2016
'''
from panda3d.core import ScissorEffect, ColorWriteAttrib, CullBinManager, AmbientLight, DirectionalLight, PointLight
from panda3d.core import Point3, Mat4, TransparencyAttrib # @UnusedImport
from direct.interval.LerpInterval import LerpHprInterval, LerpPosInterval, LerpFunc
from direct.interval.MetaInterval import Parallel, Sequence
import skybox
from typist import Typist
global globalClock
class World(object):
def __init__(self, base, USE_RP):
self.base = base
""" direct.showbase.ShowBase """
if not USE_RP:
alight = AmbientLight('alight')
alnp = self.base.render.attachNewNode(alight)
alight.setColor((0.2, 0.2, 0.2, 1))
self.base.render.setLight(alnp)
# Put lighting on the main scene
dlight = DirectionalLight('dlight')
dlnp = self.base.render.attachNewNode(dlight)
dlnp.setPos(0, 5, 5)
dlight.setColor((0.8, 0.8, 0.5, 1))
dlnp.setHpr(0, 60, 0)
self.base.render.setLight(dlnp)
plight = PointLight('plight')
plnp = self.base.render.attachNewNode(plight)
plnp.setPos(0, -50, 50)
plnp.setHpr(0, 60, 0)
self.base.render.setLight(plnp)
self.sounds = {}
def start(self, skipIntro):
self.skipIntro = skipIntro
self.base.skybox = None
self.typewriterNP = None
self.deskNP = None
self.base.taskMgr.doMethodLater(0.2, self.loadup, 'loadup')
def loadup(self, task):
# get in front
self.base.camera.setPos(0, 0, 0)
# trusty typewriter
self.typewriterNP = self.base.loader.loadModel('typewriter')
# the desk
self.deskNP = self.base.loader.loadModel('desk')
# skybox
skyb = skybox.NetmapSkybox(self.base, 'iceRiver', '', '.jpg')
self.sky = skyb.create(self.base.cam)
# sounds
self.sounds['bell'] = self.base.loader.loadSfx('bell.wav')
self.sounds['advance'] = self.base.loader.loadSfx('advance.wav')
self.sounds['pullback'] = self.base.loader.loadSfx('pullback.wav')
self.sounds['scroll'] = self.base.loader.loadSfx('scroll.wav')
self.sounds['type1'] = self.base.loader.loadSfx('type1.wav')
self.sounds['type2'] = self.base.loader.loadSfx('type2.wav')
self.sounds['type3'] = self.base.loader.loadSfx('type3.wav')
self.base.sfxManagerList[0].setVolume(0.5)
if not self.skipIntro:
self.sky.setAttrib(TransparencyAttrib.make(TransparencyAttrib.M_alpha))
self.sky.setAlphaScale(0, 1)
alphaInterval = LerpFunc(lambda a: self.sky.setAlphaScale(a, 1),
duration=1,
fromData=0,
toData=1,
blendType='easeIn')
seq = Sequence(alphaInterval)
seq.setDoneEvent('createWorld')
seq.start()
else:
self.base.messenger.send('createWorld')
def createWorld(self, task=None):
self.sky.clearAttrib(TransparencyAttrib)
self.deskNP.reparentTo(self.base.render)
self.deskNP.setScale(7.5)
self.deskNP.setPos(0, -5, -6.5)
# make a box that clips content under the desk (e.g. the paper)
bb = self.deskNP.getTightBounds()
sz = (bb[1]-bb[0]) * 0.8
self.underDeskClip = self.base.loader.loadModel('box')
self.underDeskClip.setScale(sz)
self.underDeskClip.reparentTo(self.base.render)
self.underDeskClip.setPos(-sz.x/2, -sz.y*1.1, -sz.z*0.73)
if False:
# --> nope, this actually hides everything the camera might see
self.newBin = CullBinManager.getGlobalPtr().addBin('foo', CullBinManager.BT_state_sorted, -50)
# make the box obscure geometry "inside" it
self.underDeskClip.setAttrib(ColorWriteAttrib.make(False))
self.underDeskClip.setDepthWrite(True)
self.underDeskClip.setBin('foo', -50)
else:
bb = self.underDeskClip.getTightBounds()
self.underDeskClip.removeNode()
self.typewriterNP.reparentTo(self.base.render)
self.typewriterNP.setHpr(0, 0, 0)
self.typewriterNP.setScale(5)
self.base.camera.setPos(0, -25, 5)
self.cameraTarget = Point3(0, -9.5, 7.5)
#self.cameraTarget = Point3(0, -25, 2.5)
self.cameraHprTarget = Point3(0, -19.5, 0)
self.typewriterTarget = Point3(0, -2.5, 2.666)
self.typewriterStart = Point3(0, -5, 10)
if not self.skipIntro:
self.animateArrival()
else:
self.activateTypewriter()
def animateArrival(self):
"""
Cheesy animation introducing viewer to the DESK and TYPEWRITER
:return:
"""
camMoveInterval = LerpPosInterval(self.base.camera, 2, self.cameraTarget)
camHprInterval = LerpHprInterval(self.base.camera, 2, self.cameraHprTarget)
dropKeyboardInterval = LerpPosInterval(self.typewriterNP, 2,
self.typewriterTarget,
startPos=self.typewriterStart,
blendType='easeOut')
sequence = Parallel(camMoveInterval, camHprInterval, dropKeyboardInterval)
sequence.setDoneEvent('arrivalFinished')
def arrivalFinished():
self.activateTypewriter()
self.base.ignore('enter')
self.base.ignore('esc')
self.base.accept('arrivalFinished', arrivalFinished)
sequence.start()
# for the impatient...
def cancelStartupSequence():
sequence.finish()
self.base.acceptOnce('enter', cancelStartupSequence)
self.base.acceptOnce('esc', cancelStartupSequence)
def activateTypewriter(self):
"""
Once the intro is complete, enable interactivity
"""
self.placeItems()
# re-enable mouse
mat=Mat4(self.base.camera.getMat())
mat.invertInPlace()
self.base.mouseInterfaceNode.setMat(mat)
self.base.enableMouse()
self.typist = Typist(self.base, self.typewriterNP, self.underDeskClip, self.sounds)
self.typist.start()
def placeItems(self):
"""
Place items in world after intro animation (should be a no-op, but to be sure...)
"""
self.base.camera.setHpr(self.cameraHprTarget)
self.base.camera.setPos(self.cameraTarget)
self.typewriterNP.setPos(self.typewriterTarget)
| |
"""Utility functions for use in templates / controllers
*PLEASE NOTE*: Many of these functions expect an initialized RequestConfig
object. This is expected to have been initialized for EACH REQUEST by the web
framework.
"""
import os
import re
import urllib
from routes import request_config
def _screenargs(kargs):
"""
Private function that takes a dict, and screens it against the current
request dict to determine what the dict should look like that is used.
This is responsible for the requests "memory" of the current.
"""
config = request_config()
if config.mapper.explicit and config.mapper.sub_domains:
return _subdomain_check(config, kargs)
elif config.mapper.explicit:
return kargs
controller_name = kargs.get('controller')
if controller_name and controller_name.startswith('/'):
# If the controller name starts with '/', ignore route memory
kargs['controller'] = kargs['controller'][1:]
return kargs
elif controller_name and not kargs.has_key('action'):
# Fill in an action if we don't have one, but have a controller
kargs['action'] = 'index'
memory_kargs = getattr(config, 'mapper_dict', {}).copy()
# Remove keys from memory and kargs if kargs has them as None
for key in [key for key in kargs.keys() if kargs[key] is None]:
del kargs[key]
if memory_kargs.has_key(key):
del memory_kargs[key]
# Merge the new args on top of the memory args
memory_kargs.update(kargs)
# Setup a sub-domain if applicable
if config.mapper.sub_domains:
memory_kargs = _subdomain_check(config, memory_kargs)
return memory_kargs
def _subdomain_check(config, kargs):
"""Screen the kargs for a subdomain and alter it appropriately depending
on the current subdomain or lack therof."""
if config.mapper.sub_domains:
subdomain = kargs.pop('sub_domain', None)
if isinstance(subdomain, unicode):
subdomain = str(subdomain)
fullhost = config.environ.get('HTTP_HOST') or \
config.environ.get('SERVER_NAME')
hostmatch = fullhost.split(':')
host = hostmatch[0]
port = ''
if len(hostmatch) > 1:
port += ':' + hostmatch[1]
sub_match = re.compile('^.+?\.(%s)$' % config.mapper.domain_match)
domain = re.sub(sub_match, r'\1', host)
if subdomain and not host.startswith(subdomain) and \
subdomain not in config.mapper.sub_domains_ignore:
kargs['_host'] = subdomain + '.' + domain + port
elif (subdomain in config.mapper.sub_domains_ignore or \
subdomain is None) and domain != host:
kargs['_host'] = domain + port
return kargs
else:
return kargs
def _url_quote(string, encoding):
"""A Unicode handling version of urllib.quote_plus."""
if encoding:
return urllib.quote_plus(unicode(string).encode(encoding), '/')
else:
return urllib.quote_plus(str(string), '/')
def url_for(*args, **kargs):
"""Generates a URL
All keys given to url_for are sent to the Routes Mapper instance for
generation except for::
anchor specified the anchor name to be appened to the path
host overrides the default (current) host if provided
protocol overrides the default (current) protocol if provided
qualified creates the URL with the host/port information as
needed
The URL is generated based on the rest of the keys. When generating a new
URL, values will be used from the current request's parameters (if
present). The following rules are used to determine when and how to keep
the current requests parameters:
* If the controller is present and begins with '/', no defaults are used
* If the controller is changed, action is set to 'index' unless otherwise
specified
For example, if the current request yielded a dict of
{'controller': 'blog', 'action': 'view', 'id': 2}, with the standard
':controller/:action/:id' route, you'd get the following results::
url_for(id=4) => '/blog/view/4',
url_for(controller='/admin') => '/admin',
url_for(controller='admin') => '/admin/view/2'
url_for(action='edit') => '/blog/edit/2',
url_for(action='list', id=None) => '/blog/list'
**Static and Named Routes**
If there is a string present as the first argument, a lookup is done
against the named routes table to see if there's any matching routes. The
keyword defaults used with static routes will be sent in as GET query
arg's if a route matches.
If no route by that name is found, the string is assumed to be a raw URL.
Should the raw URL begin with ``/`` then appropriate SCRIPT_NAME data will
be added if present, otherwise the string will be used as the url with
keyword args becoming GET query args.
"""
anchor = kargs.get('anchor')
host = kargs.get('host')
protocol = kargs.get('protocol')
qualified = kargs.pop('qualified', None)
# Remove special words from kargs, convert placeholders
for key in ['anchor', 'host', 'protocol']:
if kargs.get(key):
del kargs[key]
if kargs.has_key(key+'_'):
kargs[key] = kargs.pop(key+'_')
config = request_config()
route = None
static = False
encoding = config.mapper.encoding
url = ''
if len(args) > 0:
route = config.mapper._routenames.get(args[0])
if route and route.defaults.has_key('_static'):
static = True
url = route.routepath
# No named route found, assume the argument is a relative path
if not route:
static = True
url = args[0]
if url.startswith('/') and hasattr(config, 'environ') \
and config.environ.get('SCRIPT_NAME'):
url = config.environ.get('SCRIPT_NAME') + url
if static:
if kargs:
url += '?'
query_args = []
for key, val in kargs.iteritems():
query_args.append("%s=%s" % (
urllib.quote_plus(unicode(key).encode(encoding)),
urllib.quote_plus(unicode(val).encode(encoding))))
url += '&'.join(query_args)
if not static:
route_args = []
if route:
if config.mapper.hardcode_names:
route_args.append(route)
newargs = route.defaults.copy()
newargs.update(kargs)
# If this route has a filter, apply it
if route.filter:
newargs = route.filter(newargs)
# Handle sub-domains
newargs = _subdomain_check(config, newargs)
else:
newargs = _screenargs(kargs)
anchor = newargs.pop('_anchor', None) or anchor
host = newargs.pop('_host', None) or host
protocol = newargs.pop('_protocol', None) or protocol
url = config.mapper.generate(*route_args, **newargs)
if anchor:
url += '#' + _url_quote(anchor, encoding)
if host or protocol or qualified:
if not host and not qualified:
# Ensure we don't use a specific port, as changing the protocol
# means that we most likely need a new port
host = config.host.split(':')[0]
elif not host:
host = config.host
if not protocol:
protocol = config.protocol
if url is not None:
url = protocol + '://' + host + url
if not isinstance(url, str) and url is not None:
raise Exception("url_for can only return a string or None, got "
"unicode instead: %s" % url)
return url
def redirect_to(*args, **kargs):
"""Issues a redirect based on the arguments.
Redirect's *should* occur as a "302 Moved" header, however the web
framework may utilize a different method.
All arguments are passed to url_for to retrieve the appropriate URL, then
the resulting URL it sent to the redirect function as the URL.
"""
target = url_for(*args, **kargs)
config = request_config()
return config.redirect(target)
def controller_scan(directory=None):
"""Scan a directory for python files and use them as controllers"""
if directory is None:
return []
def find_controllers(dirname, prefix=''):
"""Locate controllers in a directory"""
controllers = []
for fname in os.listdir(dirname):
filename = os.path.join(dirname, fname)
if os.path.isfile(filename) and \
re.match('^[^_]{1,1}.*\.py$', fname):
controllers.append(prefix + fname[:-3])
elif os.path.isdir(filename):
controllers.extend(find_controllers(filename,
prefix=prefix+fname+'/'))
return controllers
def longest_first(fst, lst):
"""Compare the length of one string to another, shortest goes first"""
return cmp(len(lst), len(fst))
controllers = find_controllers(directory)
controllers.sort(longest_first)
return controllers
class RouteException(Exception):
"""Tossed during Route exceptions"""
pass
| |
from __future__ import unicode_literals
from datetime import datetime
from decimal import Decimal
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import helpers
from django.contrib.admin.utils import (
NestedObjects, display_for_field, flatten, flatten_fieldsets,
label_for_field, lookup_field,
)
from django.db import DEFAULT_DB_ALIAS, models
from django.test import TestCase, override_settings
from django.utils import six
from django.utils.formats import localize
from django.utils.safestring import mark_safe
from .models import (
Article, Car, Count, Event, EventGuide, Location, Site, Vehicle,
)
class NestedObjectsTests(TestCase):
"""
Tests for ``NestedObject`` utility collection.
"""
def setUp(self):
self.n = NestedObjects(using=DEFAULT_DB_ALIAS)
self.objs = [Count.objects.create(num=i) for i in range(5)]
def _check(self, target):
self.assertEqual(self.n.nested(lambda obj: obj.num), target)
def _connect(self, i, j):
self.objs[i].parent = self.objs[j]
self.objs[i].save()
def _collect(self, *indices):
self.n.collect([self.objs[i] for i in indices])
def test_unrelated_roots(self):
self._connect(2, 1)
self._collect(0)
self._collect(1)
self._check([0, 1, [2]])
def test_siblings(self):
self._connect(1, 0)
self._connect(2, 0)
self._collect(0)
self._check([0, [1, 2]])
def test_non_added_parent(self):
self._connect(0, 1)
self._collect(0)
self._check([0])
def test_cyclic(self):
self._connect(0, 2)
self._connect(1, 0)
self._connect(2, 1)
self._collect(0)
self._check([0, [1, [2]]])
def test_queries(self):
self._connect(1, 0)
self._connect(2, 0)
# 1 query to fetch all children of 0 (1 and 2)
# 1 query to fetch all children of 1 and 2 (none)
# Should not require additional queries to populate the nested graph.
self.assertNumQueries(2, self._collect, 0)
def test_on_delete_do_nothing(self):
"""
Check that the nested collector doesn't query for DO_NOTHING objects.
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
objs = [Event.objects.create()]
EventGuide.objects.create(event=objs[0])
with self.assertNumQueries(2):
# One for Location, one for Guest, and no query for EventGuide
n.collect(objs)
def test_relation_on_abstract(self):
"""
#21846 -- Check that `NestedObjects.collect()` doesn't trip
(AttributeError) on the special notation for relations on abstract
models (related_name that contains %(app_label)s and/or %(class)s).
"""
n = NestedObjects(using=DEFAULT_DB_ALIAS)
Car.objects.create()
n.collect([Vehicle.objects.first()])
class UtilsTests(TestCase):
empty_value = '-empty-'
def test_values_from_lookup_field(self):
"""
Regression test for #12654: lookup_field
"""
SITE_NAME = 'example.com'
TITLE_TEXT = 'Some title'
CREATED_DATE = datetime.min
ADMIN_METHOD = 'admin method'
SIMPLE_FUNCTION = 'function'
INSTANCE_ATTRIBUTE = 'attr'
class MockModelAdmin(object):
def get_admin_value(self, obj):
return ADMIN_METHOD
simple_function = lambda obj: SIMPLE_FUNCTION
site_obj = Site.objects.create(domain=SITE_NAME)
article = Article(
site=site_obj,
title=TITLE_TEXT,
created=CREATED_DATE,
)
article.non_field = INSTANCE_ATTRIBUTE
verifications = (
('site', SITE_NAME),
('created', localize(CREATED_DATE)),
('title', TITLE_TEXT),
('get_admin_value', ADMIN_METHOD),
(simple_function, SIMPLE_FUNCTION),
('test_from_model', article.test_from_model()),
('non_field', INSTANCE_ATTRIBUTE)
)
mock_admin = MockModelAdmin()
for name, value in verifications:
field, attr, resolved_value = lookup_field(name, article, mock_admin)
if field is not None:
resolved_value = display_for_field(resolved_value, field, self.empty_value)
self.assertEqual(value, resolved_value)
def test_null_display_for_field(self):
"""
Regression test for #12550: display_for_field should handle None
value.
"""
display_value = display_for_field(None, models.CharField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.CharField(
choices=(
(None, "test_none"),
)
), self.empty_value)
self.assertEqual(display_value, "test_none")
display_value = display_for_field(None, models.DateField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.TimeField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
# Regression test for #13071: NullBooleanField has special
# handling.
display_value = display_for_field(None, models.NullBooleanField(), self.empty_value)
expected = '<img src="%sadmin/img/icon-unknown.gif" alt="None" />' % settings.STATIC_URL
self.assertHTMLEqual(display_value, expected)
display_value = display_for_field(None, models.DecimalField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
display_value = display_for_field(None, models.FloatField(), self.empty_value)
self.assertEqual(display_value, self.empty_value)
def test_number_formats_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12345')
@override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True)
def test_number_formats_with_thousand_seperator_display_for_field(self):
display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value)
self.assertEqual(display_value, '12,345.6789')
display_value = display_for_field(12345, models.IntegerField(), self.empty_value)
self.assertEqual(display_value, '12,345')
def test_label_for_field(self):
"""
Tests for label_for_field
"""
self.assertEqual(
label_for_field("title", Article),
"title"
)
self.assertEqual(
label_for_field("title2", Article),
"another name"
)
self.assertEqual(
label_for_field("title2", Article, return_attr=True),
("another name", None)
)
self.assertEqual(
label_for_field("__unicode__", Article),
"article"
)
self.assertEqual(
label_for_field("__str__", Article),
str("article")
)
self.assertRaises(
AttributeError,
lambda: label_for_field("unknown", Article)
)
def test_callable(obj):
return "nothing"
self.assertEqual(
label_for_field(test_callable, Article),
"Test callable"
)
self.assertEqual(
label_for_field(test_callable, Article, return_attr=True),
("Test callable", test_callable)
)
self.assertEqual(
label_for_field("test_from_model", Article),
"Test from model"
)
self.assertEqual(
label_for_field("test_from_model", Article, return_attr=True),
("Test from model", Article.test_from_model)
)
self.assertEqual(
label_for_field("test_from_model_with_override", Article),
"not What you Expect"
)
self.assertEqual(
label_for_field(lambda x: "nothing", Article),
"--"
)
class MockModelAdmin(object):
def test_from_model(self, obj):
return "nothing"
test_from_model.short_description = "not Really the Model"
self.assertEqual(
label_for_field("test_from_model", Article, model_admin=MockModelAdmin),
"not Really the Model"
)
self.assertEqual(
label_for_field("test_from_model", Article,
model_admin=MockModelAdmin,
return_attr=True),
("not Really the Model", MockModelAdmin.test_from_model)
)
def test_label_for_property(self):
# NOTE: cannot use @property decorator, because of
# AttributeError: 'property' object has no attribute 'short_description'
class MockModelAdmin(object):
def my_property(self):
return "this if from property"
my_property.short_description = 'property short description'
test_from_property = property(my_property)
self.assertEqual(
label_for_field("test_from_property", Article, model_admin=MockModelAdmin),
'property short description'
)
def test_related_name(self):
"""
Regression test for #13963
"""
self.assertEqual(
label_for_field('location', Event, return_attr=True),
('location', None),
)
self.assertEqual(
label_for_field('event', Location, return_attr=True),
('awesome event', None),
)
self.assertEqual(
label_for_field('guest', Event, return_attr=True),
('awesome guest', None),
)
def test_logentry_unicode(self):
"""
Regression test for #15661
"""
log_entry = admin.models.LogEntry()
log_entry.action_flag = admin.models.ADDITION
self.assertTrue(
six.text_type(log_entry).startswith('Added ')
)
log_entry.action_flag = admin.models.CHANGE
self.assertTrue(
six.text_type(log_entry).startswith('Changed ')
)
log_entry.action_flag = admin.models.DELETION
self.assertTrue(
six.text_type(log_entry).startswith('Deleted ')
)
# Make sure custom action_flags works
log_entry.action_flag = 4
self.assertEqual(six.text_type(log_entry), 'LogEntry Object')
def test_safestring_in_field_label(self):
# safestring should not be escaped
class MyForm(forms.Form):
text = forms.CharField(label=mark_safe('<i>text</i>'))
cb = forms.BooleanField(label=mark_safe('<i>cb</i>'))
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline"><i>text</i>:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>')
# normal strings needs to be escaped
class MyForm(forms.Form):
text = forms.CharField(label='&text')
cb = forms.BooleanField(label='&cb')
form = MyForm()
self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(),
'<label for="id_text" class="required inline">&text:</label>')
self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(),
'<label for="id_cb" class="vCheckboxLabel required inline">&cb</label>')
def test_flatten(self):
flat_all = ['url', 'title', 'content', 'sites']
inputs = (
((), []),
(('url', 'title', ('content', 'sites')), flat_all),
(('url', 'title', 'content', 'sites'), flat_all),
((('url', 'title'), ('content', 'sites')), flat_all)
)
for orig, expected in inputs:
self.assertEqual(flatten(orig), expected)
def test_flatten_fieldsets(self):
"""
Regression test for #18051
"""
fieldsets = (
(None, {
'fields': ('url', 'title', ('content', 'sites'))
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
fieldsets = (
(None, {
'fields': ('url', 'title', ['content', 'sites'])
}),
)
self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites'])
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Factory functions producing ABINIT Works.
Works are packed together in a flow. A flow can be ran using abirun (abipy)
Entry points for client code (high-level interface)
"""
from __future__ import unicode_literals, division, print_function
import os
from .abiobjects import KSampling, Screening, SelfEnergy, ExcHamiltonian, HilbertTransform
#from .strategies import ScfStrategy, NscfStrategy, ScreeningStrategy, SelfEnergyStrategy, MdfBse_Strategy
from .works import BandStructureWork, G0W0Work, BseMdfWork
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
#def bandstructure_work(structure, pseudos, scf_kppa, nscf_nband,
# ndivsm, accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None,
# dos_kppa=None, workdir=None, manager=None, work_class=None, **extra_abivars):
# """
# Returns a :class:`Work` for bandstructure calculations.
#
# Args:
# structure: Pymatgen structure.
# pseudos: List of `Pseudo` objects.
# scf_kppa: Defines the sampling used for the SCF run.
# nscf_nband: Number of bands included in the NSCF run.
# ndivs: Number of divisions used to sample the smallest segment of the k-path.
# accuracy: Accuracy of the calculation.
# spin_mode: Spin polarization.
# smearing: Smearing technique.
# charge: Electronic charge added to the unit cell.
# scf_algorithm: Algorithm used for solving of the SCF cycle.
# dos_kppa: Defines the k-point sampling used for the computation of the DOS
# (None if DOS is not wanted).
# workdir: Working directory.
# manager: :class:`TaskManager` instance.
# extra_abivars: Dictionary with extra variables passed to ABINIT.
# """
# #multi = MultiDataset(structure, pseudos, ndtset=2 if dos_kppa is None else 2 + len(dos_kppa))
#
# # SCF calculation.
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
# accuracy=accuracy, spin_mode=spin_mode,
# smearing=smearing, charge=charge,
# scf_algorithm=scf_algorithm, **extra_abivars)
#
# #scf_electrons = Electrons(spin_mode=spin_mode, smearing=smearing, algorithm=scf_algorithm,
# # charge=charge, nband=scf_nband, fband=None)
# #multi[0].set_vars(scf_ksampling.to_abivars())
# #multi[0].set_vars(scf_electrons.to_abivars())
#
# # Band structure calculation.
# nscf_ksampling = KSampling.path_from_structure(ndivsm, structure)
#
# nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
#
# # DOS calculation.
# dos_strategy = None
# if dos_kppa is not None:
# dos_ksampling = KSampling.automatic_density(structure, dos_kppa, chksymbreak=0)
# #dos_ksampling = KSampling.monkhorst(dos_ngkpt, shiftk=dos_shiftk, chksymbreak=0)
# dos_strategy = NscfStrategy(scf_strategy, dos_ksampling, nscf_nband, nscf_solver=None, **extra_abivars)
# #dos_electrons = aobj.Electrons(spin_mode=spin_mode, smearing=smearing, algorithm={"iscf": -2},
# # charge=charge, nband=nscf_nband)
#
# #dt = 2 + i
# #multi[dt].set_vars(dos_ksampling.to_abivars())
# #multi[dt].set_vars(dos_electrons.to_abivars())
# #multi[dt].set_vars(_stopping_criterion("nscf", accuracy))
#
# if work_class is None: work_class = BandStructureWork
# return work_class(scf_strategy, nscf_strategy, dos_inputs=dos_strategy, workdir=workdir, manager=manager)
#
#
#def g0w0_with_ppmodel_work(structure, pseudos, scf_kppa, nscf_nband, ecuteps, ecutsigx,
# accuracy="normal", spin_mode="polarized", smearing="fermi_dirac:0.1 eV",
# ppmodel="godby", charge=0.0, scf_algorithm=None, inclvkb=2, scr_nband=None,
# sigma_nband=None, gw_qprange=1, workdir=None, manager=None, work_class=None, **extra_abivars):
# """
# Returns a :class:`Work` object that performs G0W0 calculations for the given the material.
#
# Args:
# structure: Pymatgen structure.
# pseudos: List of `Pseudo` objects.
# scf_kppa: Defines the sampling used for the SCF run.
# nscf_nband: Number of bands included in the NSCF run.
# ecuteps: Cutoff energy [Ha] for the screening matrix.
# ecutsigx: Cutoff energy [Ha] for the exchange part of the self-energy.
# accuracy: Accuracy of the calculation.
# spin_mode: Spin polarization.
# smearing: Smearing technique.
# ppmodel: Plasmonpole technique.
# charge: Electronic charge added to the unit cell.
# scf_algorithm: Algorithm used for solving of the SCF cycle.
# inclvkb: Treatment of the dipole matrix elements (see abinit variable).
# scr_nband: Number of bands used to compute the screening (default is nscf_nband)
# sigma_nband: Number of bands used to compute the self-energy (default is nscf_nband)
# gw_qprange: Option for the automatic selection of k-points and bands for GW corrections.
# See Abinit docs for more detail. The default value makes the code compute the
# QP energies for all the point in the IBZ and one band above and one band below the Fermi level.
# workdir: Working directory.
# manager: :class:`TaskManager` instance.
# extra_abivars: Dictionary with extra variables passed to ABINIT.
# """
# # TODO: Cannot use istwfk != 1.
# if "istwfk" not in extra_abivars:
# extra_abivars["istwfk"] = "*1"
#
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
# accuracy=accuracy, spin_mode=spin_mode,
# smearing=smearing, charge=charge,
# scf_algorithm=scf_algorithm, **extra_abivars)
#
# nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
#
# if scr_nband is None: scr_nband = nscf_nband
# if sigma_nband is None: sigma_nband = nscf_nband
#
# screening = Screening(ecuteps, scr_nband, w_type="RPA", sc_mode="one_shot",
# hilbert=None, ecutwfn=None, inclvkb=inclvkb)
#
# self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening,
# gw_qprange=gw_qprange, ppmodel=ppmodel)
#
# scr_strategy = ScreeningStrategy(scf_strategy, nscf_strategy, screening, **extra_abivars)
#
# sigma_strategy = SelfEnergyStrategy(scf_strategy, nscf_strategy, scr_strategy, self_energy,
# **extra_abivars)
#
# if work_class is None: work_class = G0W0Work
# return work_class(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy, workdir=workdir, manager=manager)
def g0w0_extended_work(structure, pseudos, kppa, nscf_nband, ecuteps, ecutsigx, scf_nband, accuracy="normal",
spin_mode="polarized", smearing="fermi_dirac:0.1 eV", response_models=["godby"], charge=0.0,
inclvkb=2, scr_nband=None, sigma_nband=None, workdir=None, manager=None, gamma=True, nksmall=20,
work_class=None, **extra_abivars):
"""
Returns a :class:`Work` object that performs G0W0 calculations for the given the material.
Args:
structure: Pymatgen structure.
pseudos: List of `Pseudo` objects.
scf_ Defines the sampling used for the SCF run.
nscf_nband: Number of bands included in the NSCF run.
ecuteps: Cutoff energy [Ha] for the screening matrix.
ecutsigx: Cutoff energy [Ha] for the exchange part of the self-energy.
accuracy: Accuracy of the calculation.
spin_mode: Spin polarization.
smearing: Smearing technique.
ppmodel: Plasmonpole technique.
charge: Electronic charge added to the unit cell.
scf_algorithm: Algorithm used for solving of the SCF cycle.
inclvkb: Treatment of the dipole matrix elements (see abinit variable).
scr_nband: Number of bands used to compute the screening (default is nscf_nband)
sigma_nband: Number of bands used to compute the self-energy (default is nscf_nband)
workdir: Working directory.
manager: :class:`TaskManager` instance.
nksamll: if not None, a DFT bandstucture calculation will be added after the sc run
extra_abivars: Dictionary with extra variables passed to ABINIT.
"""
# TODO: Cannot use istwfk != 1.
# all these too many options are for development only the current idea for the final version is
#if gamma:
# scf_ksampling = KSampling.automatic_density(structure=structure, kppa=10000, chksymbreak=0, shifts=(0, 0, 0))
# nscf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
# if kppa <= 13:
# nscf_ksampling = KSampling.gamma_centered(kpts=(scf_kppa, scf_kppa, scf_kppa))
# else:
# nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0, shifts=(0, 0, 0))
#else:
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
# nscf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
if gamma:
if kppa == 1:
scf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
nscf_ksampling = KSampling.gamma_centered(kpts=(1, 1, 1))
elif kppa == 2:
scf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
nscf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
elif kppa < 0:
scf_ksampling = KSampling.gamma_centered(kpts=(-kppa, -kppa, -kppa))
nscf_ksampling = KSampling.gamma_centered(kpts=(2, 2, 2))
elif kppa <= 13:
scf_ksampling = KSampling.gamma_centered(kpts=(kppa, kppa, kppa))
nscf_ksampling = KSampling.gamma_centered(kpts=(kppa, kppa, kppa))
else:
scf_ksampling = KSampling.automatic_density(structure, kppa, chksymbreak=0, shifts=(0, 0, 0))
nscf_ksampling = KSampling.automatic_density(structure, kppa, chksymbreak=0, shifts=(0, 0, 0))
else:
#this is the original behaviour before the devellopment of the gwwrapper
scf_ksampling = KSampling.automatic_density(structure, kppa, chksymbreak=0)
nscf_ksampling = KSampling.automatic_density(structure, kppa, chksymbreak=0)
print(scf_ksampling)
print(nscf_ksampling)
if "istwfk" not in extra_abivars:
extra_abivars["istwfk"] = "*1"
scf_inputs = []
to_add = {}
#scf_nband = min(nscf_nband)
#print(scf_nband)
extra_abivars.update(to_add)
for k in extra_abivars.keys():
if k[-2:] == '_s':
var = k[:len(k)-2]
values = extra_abivars.pop(k)
to_add.update({k: values[-1]})
for value in values:
extra_abivars[var] = value
extra_abivars['pawecutdg'] = extra_abivars['ecut']*2
scf_inputs.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy,
spin_mode=spin_mode, smearing=smearing, charge=charge,
scf_algorithm=None, nband=scf_nband, **extra_abivars))
#temporary for testing a new approach ...
spread_scr = False if os.path.isfile('no_spread_scr') else True
if len(scf_strategy) == 0:
scf_strategy.append(ScfStrategy(structure, pseudos, scf_ksampling, accuracy=accuracy, spin_mode=spin_mode,
smearing=smearing, charge=charge, scf_algorithm=None, nband=scf_nband,
**extra_abivars))
nscf_strategy = NscfStrategy(scf_strategy[-1], nscf_ksampling, int(max(nscf_nband)*1.1)+1,
nbdbuf=int(0.1*max(nscf_nband)), nstep=200, **extra_abivars)
if scr_nband is None:
scr_nband = nscf_nband
if sigma_nband is None:
sigma_nband = nscf_nband
if ecutsigx < max(ecuteps):
ecutsigx = max(ecuteps)
sigma_strategy = []
if 'cd' in response_models:
hilbert = HilbertTransform(nomegasf=100, domegasf=None, spmeth=1, nfreqre=None, freqremax=None, nfreqim=None,
freqremin=None)
for response_model in response_models:
for ecuteps_v in ecuteps:
for nscf_nband_v in nscf_nband:
scr_nband = nscf_nband_v
sigma_nband = nscf_nband_v
if response_model == 'cd':
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", hilbert=hilbert,
ecutwfn=None, inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, hilbert=hilbert)
else:
ppmodel = response_model
screening = Screening(ecuteps_v, scr_nband, w_type="RPA", sc_mode="one_shot", ecutwfn=None,
inclvkb=inclvkb)
self_energy = SelfEnergy("gw", "one_shot", sigma_nband, ecutsigx, screening, ppmodel=ppmodel,
gw_qprange=1)
scr_strategy = ScreeningStrategy(scf_strategy[-1], nscf_strategy, screening, **extra_abivars)
sigma_strategy.append(SelfEnergyStrategy(scf_strategy[-1], nscf_strategy, scr_strategy, self_energy,
**extra_abivars))
if work_class is None: work_class = G0W0Work
print(work_class)
return work_class(scf_strategy, nscf_strategy, scr_strategy, sigma_strategy, workdir=workdir, manager=manager,
spread_scr=spread_scr, nksmall=nksmall)
#def bse_with_mdf_work(structure, pseudos, scf_kppa, nscf_nband, nscf_ngkpt, nscf_shiftk,
# ecuteps, bs_loband, bs_nband, soenergy, mdf_epsinf,
# exc_type="TDA", bs_algo="haydock", accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, workdir=None, manager=None,
# work_class=None, **extra_abivars):
# """
# Returns a :class:`Work` object that performs a GS + NSCF + Bethe-Salpeter calculation.
# The self-energy corrections are approximated with the scissors operator.
# The screening in modeled by the model dielectric function.
#
# Args:
# structure: :class:`Structure` object.
# pseudos: List of `Pseudo` objects.
# scf_kppa: Defines the sampling used for the SCF run.
# nscf_nband: Number of bands included in the NSCF run.
# nscf_ngkpt: Divisions of the k-mesh used for the NSCF and the BSE run.
# nscf_shiftk: Shifts used for the NSCF and the BSE run.
# ecuteps: Cutoff energy [Ha] for the screening matrix.
# bs_loband: Index of the first occupied band included the e-h basis set
# (ABINIT convention i.e. first band starts at 1).
# Can be scalar or array of shape (nsppol,)
# bs_nband: Highest band idex used for the construction of the e-h basis set.
# soenergy: Scissor energy in Hartree.
# mdf_epsinf: Value of the macroscopic dielectric function used in expression for the model dielectric function.
# exc_type: Approximation used for the BSE Hamiltonian (Tamm-Dancoff or coupling).
# bs_algo: Algorith for the computatio of the macroscopic dielectric function.
# accuracy: Accuracy of the calculation.
# spin_mode: Spin polarization.
# smearing: Smearing technique.
# charge: Electronic charge added to the unit cell.
# scf_algorithm: Algorithm used for solving the SCF cycle.
# workdir: Working directory.
# manager: :class:`TaskManger` instance.
# extra_abivars: Dictionary with extra variables passed to ABINIT.
# """
# # TODO: Cannot use istwfk != 1.
# if "istwfk" not in extra_abivars:
# extra_abivars["istwfk"] = "*1"
#
# # Ground-state strategy.
# scf_ksampling = KSampling.automatic_density(structure, scf_kppa, chksymbreak=0)
#
# scf_strategy = ScfStrategy(structure, pseudos, scf_ksampling,
# accuracy=accuracy, spin_mode=spin_mode,
# smearing=smearing, charge=charge, scf_algorithm=None, **extra_abivars)
#
# # NSCF calculation with the randomly-shifted k-mesh.
# nscf_ksampling = KSampling.monkhorst(nscf_ngkpt, shiftk=nscf_shiftk, chksymbreak=0)
#
# nscf_strategy = NscfStrategy(scf_strategy, nscf_ksampling, nscf_nband, **extra_abivars)
#
# # Strategy for the BSE calculation.
# exc_ham = ExcHamiltonian(bs_loband, bs_nband, soenergy, coulomb_mode="model_df", ecuteps=ecuteps,
# spin_mode=spin_mode, mdf_epsinf=mdf_epsinf, exc_type=exc_type, algo=bs_algo,
# bs_freq_mesh=None, with_lf=True, zcut=None)
#
# bse_strategy = MdfBse_Strategy(scf_strategy, nscf_strategy, exc_ham, **extra_abivars)
#
# if work_class is None: work_class = BseMdfWork
# return work_class(scf_strategy, nscf_strategy, bse_strategy, workdir=workdir, manager=manager)
| |
from __future__ import print_function, unicode_literals
import logging
import re
import itertools
from prompt_toolkit.completion import Completer, Completion
from .packages.sqlcompletion import suggest_type
from .packages.parseutils import last_word
from .packages.vspecial.namedqueries import namedqueries
try:
from collections import Counter
except ImportError:
# python 2.6
from .packages.counter import Counter
_logger = logging.getLogger(__name__)
class VCompleter(Completer):
keywords = [
'ACCESS', 'ADD', 'ALL', 'ALTER TABLE', 'AND', 'ANY', 'AS',
'ASC', 'AUDIT', 'BETWEEN', 'BY', 'CASCADE', 'CASE', 'CHAR', 'CHECK',
'CLUSTER', 'COLUMN', 'COMMENT', 'COMPRESS', 'CONNECT', 'COPY',
'CREATE', 'CURRENT', 'DATABASE', 'DATE', 'DECIMAL', 'DEFAULT',
'DELETE FROM', 'DELIMITER', 'DESC', 'DESCRIBE', 'DISTINCT', 'DROP',
'ELSE', 'ENCODING', 'ESCAPE', 'EXCLUSIVE', 'EXISTS', 'EXTENSION',
'FILE', 'FLOAT', 'FOR', 'FORMAT', 'FORCE_QUOTE', 'FORCE_NOT_NULL',
'FREEZE', 'FROM', 'FULL', 'FUNCTION', 'GRANT', 'GROUP BY',
'HAVING', 'HEADER', 'IDENTIFIED', 'ILIKE', 'IMMEDIATE', 'IN', 'INCREMENT',
'INDEX', 'INITIAL', 'INSERT INTO', 'INTEGER', 'INTERSECT', 'INTO',
'IS', 'JOIN', 'LEFT', 'LEVEL', 'LIKE', 'LIMIT', 'LOCK', 'LONG',
'MAXEXTENTS', 'MINUS', 'MLSLABEL', 'MODE', 'MODIFY', 'NOAUDIT',
'NOCOMPRESS', 'NOT', 'NOWAIT', 'NULL', 'NUMBER', 'OIDS', 'OF',
'OFFLINE', 'ON', 'ONLINE', 'OPTION', 'OR', 'ORDER BY', 'OUTER',
'OWNER', 'PCTFREE', 'PRIMARY', 'PRIOR', 'PRIVILEGES', 'QUOTE',
'RAW', 'RENAME', 'RESOURCE', 'REVOKE', 'RIGHT', 'ROW', 'ROWID',
'ROWNUM', 'ROWS', 'SCHEMA', 'SELECT', 'SESSION', 'SET', 'SHARE',
'SIZE', 'SMALLINT', 'START', 'SUCCESSFUL', 'SYNONYM', 'SYSDATE',
'TABLE', 'TEMPLATE', 'THEN', 'TO', 'TRIGGER', 'TRUNCATE', 'UID',
'UNION', 'UNIQUE', 'UPDATE', 'USE', 'USER', 'USING', 'VALIDATE',
'VALUES', 'VARCHAR', 'VARCHAR2', 'VIEW', 'WHEN', 'WHENEVER', 'WHERE',
'WITH', 'EXPLAIN', 'PROFILE'
]
functions = [
'AVG', 'COUNT', 'DISTINCT', 'EXPORT_OBJECTS', 'FIRST', 'FORMAT',
'LAST', 'LCASE', 'LEN', 'MAX', 'MIN', 'MID', 'NOW', 'ROUND', 'SUM',
'TOP', 'UCASE'
]
datatypes = [
'BIGINT', 'BOOLEAN', 'CHAR', 'DATE', 'DOUBLE PRECISION', 'INT',
'INTEGER', 'NUMERIC', 'REAL', 'TEXT', 'VARCHAR'
]
def __init__(self, smart_completion=True, vspecial=None):
super(VCompleter, self).__init__()
self.smart_completion = smart_completion
self.vspecial = vspecial
self.reserved_words = set()
for x in self.keywords:
self.reserved_words.update(x.split())
self.name_pattern = re.compile("^[_a-z][_a-z0-9\$]*$")
self.databases = []
self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {},
'datatypes': {}}
self.search_path = []
self.all_completions = set(self.keywords + self.functions)
def escape_name(self, name):
name = name.decode('utf-8') if type(name) == bytes else name
if name and ((not self.name_pattern.match(name))
or (name.upper() in self.reserved_words)
or (name.upper() in self.functions)):
name = '"%s"' % name
return name
def unescape_name(self, name):
""" Unquote a string."""
if name and name[0] == '"' and name[-1] == '"':
name = name[1:-1]
return name
def escaped_names(self, names):
if names:
return [self.escape_name(name) for name in names]
return []
def extend_database_names(self, databases):
databases = self.escaped_names(databases)
self.databases.extend(databases)
def extend_keywords(self, additional_keywords):
self.keywords.extend(additional_keywords)
self.all_completions.update(additional_keywords)
def extend_schemata(self, schemata):
# schemata is a list of schema names
schemata = self.escaped_names(schemata)
metadata = self.dbmetadata['tables']
for schema in schemata:
metadata[schema] = {}
# dbmetadata.values() are the 'tables' and 'functions' dicts
for metadata in self.dbmetadata.values():
for schema in schemata:
metadata[schema] = {}
self.all_completions.update(schemata)
def extend_relations(self, data, kind):
""" extend metadata for tables or views
:param data: list of (schema_name, rel_name) tuples
:param kind: either 'tables' or 'views'
:return:
"""
data = [self.escaped_names(d) for d in data]
# dbmetadata['tables']['schema_name']['table_name'] should be a list of
# column names. Default to an asterisk
metadata = self.dbmetadata[kind]
for schema, relname in data:
try:
metadata[schema][relname] = ['*']
except KeyError:
_logger.error('%r %r listed in unrecognized schema %r',
kind, relname, schema)
self.all_completions.add(relname)
def extend_columns(self, column_data, kind):
""" extend column metadata
:param column_data: list of (schema_name, rel_name, column_name) tuples
:param kind: either 'tables' or 'views'
:return:
"""
column_data = [self.escaped_names(d) for d in column_data]
metadata = self.dbmetadata[kind]
for schema, relname, column in column_data:
try:
metadata[schema][relname].append(column)
except KeyError:
pass
else:
self.all_completions.add(column)
def extend_functions(self, func_data):
# func_data is an iterator of (schema_name, function_name)
# dbmetadata['functions']['schema_name']['function_name'] should return
# function metadata -- right now we're not storing any further metadata
# so just default to None as a placeholder
metadata = self.dbmetadata['functions']
for f in func_data:
schema, func = self.escaped_names(f)
metadata[schema][func] = None
self.all_completions.add(func)
def extend_datatypes(self, type_data):
# dbmetadata['datatypes'][schema_name][type_name] should store type
# metadata, such as composite type field names. Currently, we're not
# storing any metadata beyond typename, so just store None
meta = self.dbmetadata['datatypes']
if type_data:
for t in type_data:
schema, type_name = self.escaped_names(t)
meta[schema][type_name] = None
self.all_completions.add(type_name)
def set_search_path(self, search_path):
self.search_path = self.escaped_names(search_path)
def reset_completions(self):
self.databases = []
self.special_commands = []
self.search_path = []
self.dbmetadata = {'tables': {}, 'views': {}, 'functions': {},
'datatypes': {}}
self.all_completions = set(self.keywords + self.functions)
def find_matches(self, text, collection, start_only=False, fuzzy=True,
meta=None, meta_collection=None):
"""Find completion matches for the given text.
Given the user's input text and a collection of available
completions, find completions matching the last word of the
text.
If `start_only` is True, the text will match an available
completion only at the beginning. Otherwise, a completion is
considered a match if the text appears anywhere within it.
yields prompt_toolkit Completion instances for any matches found
in the collection of available completions.
"""
text = last_word(text, include='most_punctuations').lower()
# Construct a `_match` function for either fuzzy or non-fuzzy matching
# The match function returns a 2-tuple used for sorting the matches,
# or None if the item doesn't match
if fuzzy:
regex = '.*?'.join(map(re.escape, text))
pat = re.compile('(%s)' % regex)
def _match(item):
r = pat.search(self.unescape_name(item))
if r:
return len(r.group()), r.start()
else:
match_end_limit = len(text) if start_only else None
def _match(item):
match_point = item.lower().find(text, 0, match_end_limit)
if match_point >= 0:
return match_point, 0
if meta_collection:
# Each possible completion in the collection has a corresponding
# meta-display string
collection = zip(collection, meta_collection)
else:
# All completions have an identical meta
collection = zip(collection, itertools.repeat(meta))
completions = []
for item, meta in collection:
sort_key = _match(item)
if sort_key:
if meta and len(meta) > 50:
# Truncate meta-text to 50 characters, if necessary
meta = meta[:47] + u'...'
completions.append((sort_key, item, meta))
return [Completion(item, -len(text), display_meta=meta)
for sort_key, item, meta in sorted(completions)]
def get_completions(self, document, complete_event, smart_completion=None):
word_before_cursor = document.get_word_before_cursor(WORD=True)
if smart_completion is None:
smart_completion = self.smart_completion
# If smart_completion is off then match any word that starts with
# 'word_before_cursor'.
if not smart_completion:
return self.find_matches(word_before_cursor, self.all_completions,
start_only=True, fuzzy=False)
completions = []
suggestions = suggest_type(document.text, document.text_before_cursor)
for suggestion in suggestions:
_logger.debug('Suggestion type: %r', suggestion['type'])
if suggestion['type'] == 'column':
tables = suggestion['tables']
_logger.debug("Completion column scope: %r", tables)
scoped_cols = self.populate_scoped_cols(tables)
if suggestion.get('drop_unique'):
# drop_unique is used for 'tb11 JOIN tbl2 USING (...' which
# should suggest only columns that appear in more than one
# table
scoped_cols = [col for (col, count)
in Counter(scoped_cols).items()
if count > 1 and col != '*']
cols = self.find_matches(word_before_cursor, scoped_cols,
meta='column')
completions.extend(cols)
elif suggestion['type'] == 'function':
# suggest user-defined functions using substring matching
funcs = self.populate_schema_objects(
suggestion['schema'], 'functions')
user_funcs = self.find_matches(word_before_cursor, funcs,
meta='function')
completions.extend(user_funcs)
if not suggestion['schema']:
# also suggest hardcoded functions using startswith
# matching
predefined_funcs = self.find_matches(word_before_cursor,
self.functions,
start_only=True,
fuzzy=False,
meta='function')
completions.extend(predefined_funcs)
elif suggestion['type'] == 'schema':
schema_names = self.dbmetadata['tables'].keys()
# Unless we're sure the user really wants them, hide schema
# names starting with pg_, which are mostly temporary schemas
if not word_before_cursor.startswith('pg_'):
schema_names = [s for s in schema_names
if not s.startswith('pg_')]
schema_names = self.find_matches(word_before_cursor,
schema_names,
meta='schema')
completions.extend(schema_names)
elif suggestion['type'] == 'table':
tables = self.populate_schema_objects(
suggestion['schema'], 'tables')
# Unless we're sure the user really wants them, don't suggest
# the pg_catalog tables that are implicitly on the search path
if not suggestion['schema'] and (
not word_before_cursor.startswith('pg_')):
tables = [t for t in tables if not t.startswith('pg_')]
tables = self.find_matches(word_before_cursor, tables,
meta='table')
completions.extend(tables)
elif suggestion['type'] == 'view':
views = self.populate_schema_objects(
suggestion['schema'], 'views')
if not suggestion['schema'] and (
not word_before_cursor.startswith('pg_')):
views = [v for v in views if not v.startswith('pg_')]
views = self.find_matches(word_before_cursor, views,
meta='view')
completions.extend(views)
elif suggestion['type'] == 'alias':
aliases = suggestion['aliases']
aliases = self.find_matches(word_before_cursor, aliases,
meta='table alias')
completions.extend(aliases)
elif suggestion['type'] == 'database':
dbs = self.find_matches(word_before_cursor, self.databases,
meta='database')
completions.extend(dbs)
elif suggestion['type'] == 'keyword':
keywords = self.find_matches(word_before_cursor, self.keywords,
start_only=True,
fuzzy=False,
meta='keyword')
completions.extend(keywords)
elif suggestion['type'] == 'special':
if not self.vspecial:
continue
commands = self.vspecial.commands
cmd_names = commands.keys()
desc = [commands[cmd].description for cmd in cmd_names]
special = self.find_matches(word_before_cursor, cmd_names,
start_only=True,
fuzzy=False,
meta_collection=desc)
completions.extend(special)
elif suggestion['type'] == 'datatype':
# suggest custom datatypes
types = self.populate_schema_objects(
suggestion['schema'], 'datatypes')
types = self.find_matches(word_before_cursor, types,
meta='datatype')
completions.extend(types)
if not suggestion['schema']:
# Also suggest hardcoded types
types = self.find_matches(word_before_cursor,
self.datatypes, start_only=True,
fuzzy=False, meta='datatype')
completions.extend(types)
elif suggestion['type'] == 'namedquery':
queries = self.find_matches(word_before_cursor, namedqueries.list(),
start_only=False, fuzzy=True,
meta='named query')
completions.extend(queries)
return completions
def populate_scoped_cols(self, scoped_tbls):
""" Find all columns in a set of scoped_tables
:param scoped_tbls: list of (schema, table, alias) tuples
:return: list of column names
"""
columns = []
meta = self.dbmetadata
for tbl in scoped_tbls:
if tbl[0]:
# A fully qualified schema.relname reference
schema = self.escape_name(tbl[0])
relname = self.escape_name(tbl[1])
# We don't know if schema.relname is a table or view. Since
# tables and views cannot share the same name, we can check one
# at a time
try:
columns.extend(meta['tables'][schema][relname])
# Table exists, so don't bother checking for a view
continue
except KeyError:
pass
try:
columns.extend(meta['views'][schema][relname])
except KeyError:
pass
else:
# Schema not specified, so traverse the search path looking for
# a table or view that matches. Note that in order to get proper
# shadowing behavior, we need to check both views and tables for
# each schema before checking the next schema
for schema in self.search_path:
relname = self.escape_name(tbl[1])
try:
columns.extend(meta['tables'][schema][relname])
break
except KeyError:
pass
try:
columns.extend(meta['views'][schema][relname])
break
except KeyError:
pass
return columns
def populate_schema_objects(self, schema, obj_type):
"""Returns list of tables or functions for a (optional) schema"""
metadata = self.dbmetadata[obj_type]
if schema:
try:
objects = metadata[schema].keys()
except KeyError:
# schema doesn't exist
objects = []
else:
schemas = self.search_path
objects = [obj for schema in schemas
for obj in metadata[schema].keys()]
return objects
| |
#!/usr/bin/env python
import argparse
import os
import subprocess
import sys
from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \
enable_verbose_mode, is_verbose_mode, get_target_arch
from lib.util import execute_stdout, get_electron_version, scoped_cwd
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
VENDOR_DIR = os.path.join(SOURCE_ROOT, 'vendor')
PYTHON_26_URL = 'https://chromium.googlesource.com/chromium/deps/python_26'
if os.environ.has_key('CI'):
NPM = os.path.join(SOURCE_ROOT, 'node_modules', '.bin', 'npm')
else:
NPM = 'npm'
if sys.platform in ['win32', 'cygwin']:
NPM += '.cmd'
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
defines = args_to_defines(args)
if not args.yes and PLATFORM != 'win32':
check_root()
if args.verbose:
enable_verbose_mode()
if sys.platform == 'cygwin':
update_win32_python()
update_submodules()
libcc_source_path = args.libcc_source_path
libcc_shared_library_path = args.libcc_shared_library_path
libcc_static_library_path = args.libcc_static_library_path
# Redirect to use local libchromiumcontent build.
if args.build_libchromiumcontent:
build_libchromiumcontent(args.verbose, args.target_arch, defines)
dist_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'dist', 'main')
libcc_source_path = os.path.join(dist_dir, 'src')
libcc_shared_library_path = os.path.join(dist_dir, 'shared_library')
libcc_static_library_path = os.path.join(dist_dir, 'static_library')
if PLATFORM != 'win32':
if not args.disable_clang and args.clang_dir == '':
# Download prebuilt clang binaries.
update_clang()
setup_python_libs()
update_node_modules('.')
bootstrap_brightray(args.dev, args.url, args.target_arch,
libcc_source_path, libcc_shared_library_path,
libcc_static_library_path)
if PLATFORM == 'linux':
download_sysroot(args.target_arch)
create_chrome_version_h()
touch_config_gypi()
run_update(defines, args.disable_clang, args.clang_dir)
update_electron_modules('spec', args.target_arch)
def parse_args():
parser = argparse.ArgumentParser(description='Bootstrap this project')
parser.add_argument('-u', '--url',
help='The base URL from which to download '
'libchromiumcontent (i.e., the URL you passed to '
'libchromiumcontent\'s script/upload script',
default=BASE_URL,
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-d', '--dev', action='store_true',
help='Do not download static_library build')
parser.add_argument('-y', '--yes', '--assume-yes',
action='store_true',
help='Run non-interactively by assuming "yes" to all ' \
'prompts.')
parser.add_argument('--target_arch', default=get_target_arch(),
help='Manually specify the arch to build for')
parser.add_argument('--clang_dir', default='', help='Path to clang binaries')
parser.add_argument('--disable_clang', action='store_true',
help='Use compilers other than clang for building')
parser.add_argument('--build_libchromiumcontent', action='store_true',
help='Build local version of libchromiumcontent')
parser.add_argument('--libcc_source_path', required=False,
help='The source path of libchromiumcontent. ' \
'NOTE: All options of libchromiumcontent are ' \
'required OR let electron choose it')
parser.add_argument('--libcc_shared_library_path', required=False,
help='The shared library path of libchromiumcontent.')
parser.add_argument('--libcc_static_library_path', required=False,
help='The static library path of libchromiumcontent.')
return parser.parse_args()
def args_to_defines(args):
defines = ''
if args.disable_clang:
defines += ' clang=0'
if args.clang_dir:
defines += ' make_clang_dir=' + args.clang_dir
defines += ' clang_use_chrome_plugins=0'
return defines
def check_root():
if os.geteuid() == 0:
print "We suggest not running this as root, unless you're really sure."
choice = raw_input("Do you want to continue? [y/N]: ")
if choice not in ('y', 'Y'):
sys.exit(0)
def update_submodules():
execute_stdout(['git', 'submodule', 'sync', '--recursive'])
execute_stdout(['git', 'submodule', 'update', '--init', '--recursive'])
def setup_python_libs():
for lib in ('requests', 'boto'):
with scoped_cwd(os.path.join(VENDOR_DIR, lib)):
execute_stdout([sys.executable, 'setup.py', 'build'])
def bootstrap_brightray(is_dev, url, target_arch, libcc_source_path,
libcc_shared_library_path,
libcc_static_library_path):
bootstrap = os.path.join(VENDOR_DIR, 'brightray', 'script', 'bootstrap')
args = [
'--commit', LIBCHROMIUMCONTENT_COMMIT,
'--target_arch', target_arch,
url
]
if is_dev:
args = ['--dev'] + args
if (libcc_source_path != None and
libcc_shared_library_path != None and
libcc_static_library_path != None):
args += ['--libcc_source_path', libcc_source_path,
'--libcc_shared_library_path', libcc_shared_library_path,
'--libcc_static_library_path', libcc_static_library_path]
execute_stdout([sys.executable, bootstrap] + args)
def set_clang_env(env):
llvm_dir = os.path.join(SOURCE_ROOT, 'vendor', 'llvm-build',
'Release+Asserts', 'bin')
env['CC'] = os.path.join(llvm_dir, 'clang')
env['CXX'] = os.path.join(llvm_dir, 'clang++')
def update_node_modules(dirname, env=None):
if env is None:
env = os.environ.copy()
if PLATFORM == 'linux':
# Use prebuilt clang for building native modules.
set_clang_env(env)
env['npm_config_clang'] = '1'
with scoped_cwd(dirname):
args = [NPM, 'install']
if is_verbose_mode():
args += ['--verbose']
# Ignore npm install errors when running in CI.
if os.environ.has_key('CI'):
try:
execute_stdout(args, env)
except subprocess.CalledProcessError:
pass
else:
execute_stdout(args, env)
def update_electron_modules(dirname, target_arch):
env = os.environ.copy()
env['npm_config_arch'] = target_arch
env['npm_config_target'] = get_electron_version()
env['npm_config_disturl'] = 'https://atom.io/download/atom-shell'
update_node_modules(dirname, env)
def update_win32_python():
with scoped_cwd(VENDOR_DIR):
if not os.path.exists('python_26'):
execute_stdout(['git', 'clone', PYTHON_26_URL])
def build_libchromiumcontent(verbose, target_arch, defines):
args = [sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'build-libchromiumcontent.py')]
if verbose:
args += ['-v']
if defines:
args += ['--defines', defines]
execute_stdout(args + ['--target_arch', target_arch])
def update_clang():
execute_stdout([os.path.join(SOURCE_ROOT, 'script', 'update-clang.sh')])
def download_sysroot(target_arch):
if target_arch == 'ia32':
target_arch = 'i386'
if target_arch == 'x64':
target_arch = 'amd64'
execute_stdout([sys.executable,
os.path.join(SOURCE_ROOT, 'script', 'install-sysroot.py'),
'--arch', target_arch])
def create_chrome_version_h():
version_file = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor',
'libchromiumcontent', 'VERSION')
target_file = os.path.join(SOURCE_ROOT, 'atom', 'common', 'chrome_version.h')
template_file = os.path.join(SOURCE_ROOT, 'script', 'chrome_version.h.in')
with open(version_file, 'r') as f:
version = f.read()
with open(template_file, 'r') as f:
template = f.read()
content = template.replace('{PLACEHOLDER}', version.strip())
# We update the file only if the content has changed (ignoring line ending
# differences).
should_write = True
if os.path.isfile(target_file):
with open(target_file, 'r') as f:
should_write = f.read().replace('r', '') != content.replace('r', '')
if should_write:
with open(target_file, 'w') as f:
f.write(content)
def touch_config_gypi():
config_gypi = os.path.join(SOURCE_ROOT, 'vendor', 'node', 'config.gypi')
with open(config_gypi, 'w+') as f:
content = "\n{'variables':{}}"
if f.read() != content:
f.write(content)
def run_update(defines, disable_clang, clang_dir):
env = os.environ.copy()
if not disable_clang and clang_dir == '':
# Build with prebuilt clang.
set_clang_env(env)
update = os.path.join(SOURCE_ROOT, 'script', 'update.py')
execute_stdout([sys.executable, update, '--defines', defines], env)
if __name__ == '__main__':
sys.exit(main())
| |
"""
Utilities for interprocess communication between Python and Storm.
"""
from __future__ import absolute_import, print_function, unicode_literals
try:
import simplejson as json
except ImportError:
import json
import logging
import logging.handlers
import os
import sys
from collections import deque
from threading import RLock
from six import PY3
# Module globals
_PYTHON_LOG_LEVELS = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG
}
_log = logging.getLogger('streamparse.ipc')
# pending commands/tuples we read while trying to read task IDs
_pending_commands = deque()
# pending task IDs we read while trying to read commands/tuples
_pending_task_ids = deque()
_pid = os.getpid()
_debug = False
_topology_name = _component_name = _task_id = _conf = _context = None
_reader_lock = RLock()
_writer_lock = RLock()
# Setup stdin line reader and stdout
if PY3:
# Ensure we don't fall back on the platform-dependent encoding and always
# use UTF-8 https://docs.python.org/3.4/library/sys.html#sys.stdin
import io
_readline = io.TextIOWrapper(sys.stdin.buffer,
encoding='utf-8').readline
else:
def _readline():
line = sys.stdin.readline()
return line.decode('utf-8')
_stdout = sys.stdout
# Travis CI has stdout set to an io.StringIO object instead of an
# io.BufferedWriter object which is what's actually used when streamparse is
# running
if hasattr(sys.stdout, 'buffer'):
_stdout = sys.stdout.buffer
else:
_stdout = sys.stdout
class LogStream(object):
"""Object that implements enough of the Python stream API to be used as
sys.stdout and sys.stderr. Messages are written to the Python logger.
"""
def __init__(self, logger):
self.logger = logger
def write(self, message):
if message.strip() == "":
return # skip blank lines
try:
self.logger.info(message)
except:
# There's been an issue somewhere in the logging sub-system
# so we'll put stderr and stdout back to their originals and
# raise the exception which will cause Storm to choke
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise
def flush(self):
"""No-op method to prevent crashes when someone does
sys.stdout.flush.
"""
pass
class Tuple(object):
"""Storm's primitive data type passed around via streams.
:ivar id: the ID of the tuple.
:type id: str
:ivar component: component that the tuple was generated from.
:type component: str
:ivar stream: the stream that the tuple was emitted into.
:type stream: str
:ivar task: the task the tuple was generated from.
:type task: int
:ivar values: the payload of the tuple where data is stored.
:type values: list
"""
__slots__ = ['id', 'component', 'stream', 'task', 'values']
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __repr__(self):
return ('Tuple(id={!r}, component={!r}, stream={!r}, task={!r}, '
'values={!r})'
.format(self.id, self.component, self.stream, self.task,
self.values))
# Message recieving
def read_message():
"""Read a message from Storm, reconstruct newlines appropriately.
All of Storm's messages (for either Bolts or Spouts) should be of the form:
'<command or task_id form prior emit>\nend\n'
Command example, an incoming tuple to a bolt:
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\nend\n'
Command example for a Spout to emit it's next tuple:
'{"command": "next"}\nend\n'
Example, the task IDs a prior emit was sent to:
'[12, 22, 24]\nend\n'
The edge case of where we read '' from _readline indicating EOF, usually
means that communication with the supervisor has been severed.
"""
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with _reader_lock:
line = _readline()
if line == 'end\n':
break
elif line == '':
_log.error("Received EOF while trying to read stdin from Storm, "
"pipe appears to be broken, exiting.")
sys.exit(1)
elif line == '\n':
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
_log.warn("While trying to read a command or pending task ID, "
"Storm has instead sent {:,} '\\n' messages."
.format(num_blank_lines))
continue
msg = '{}{}\n'.format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
_log.error("JSON decode error for message: %r", msg, exc_info=True)
raise
def read_task_ids():
if _pending_task_ids:
return _pending_task_ids.popleft()
else:
msg = read_message()
while not isinstance(msg, list):
_pending_commands.append(msg)
msg = read_message()
return msg
def read_command():
if _pending_commands:
return _pending_commands.popleft()
else:
msg = read_message()
while isinstance(msg, list):
_pending_task_ids.append(msg)
msg = read_message()
return msg
def read_tuple():
cmd = read_command()
return Tuple(cmd['id'], cmd['comp'], cmd['stream'], cmd['task'],
cmd['tuple'])
def read_handshake():
"""Read and process an initial handshake message from Storm."""
global _topology_name, _component_name, _task_id, _conf, _context, _debug
msg = read_message()
pid_dir, _conf, _context = msg['pidDir'], msg['conf'], msg['context']
# Write a blank PID file out to the pidDir
open('{}/{}'.format(pid_dir, str(_pid)), 'w').close()
send_message({'pid': _pid})
# Set up globals
_topology_name = _conf.get('topology.name', '')
_task_id = _context.get('taskid', '')
_component_name = _context.get('task->component', {}).get(str(_task_id), '')
_debug = _conf.get('topology.debug', False)
# Set up logging
log_path = _conf.get('streamparse.log.path')
if log_path:
root_log = logging.getLogger()
max_bytes = _conf.get('streamparse.log.max_bytes', 1000000) # 1 MB
backup_count = _conf.get('streamparse.log.backup_count', 10)
log_file = ('{log_path}/streamparse_{topology_name}_{component_name}_'
'{task_id}_{pid}.log'
.format(log_path=log_path, topology_name=_topology_name,
component_name=_component_name, task_id=_task_id,
pid=_pid))
handler = logging.handlers.RotatingFileHandler(log_file,
maxBytes=max_bytes,
backupCount=backup_count)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_log.addHandler(handler)
log_level = _conf.get('streamparse.log.level', 'info')
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if _debug:
# potentially override logging that was provided if topology.debug
# was set to true
log_level = logging.DEBUG
root_log.setLevel(log_level)
else:
send_message({
'command': 'log',
'msg': ('WARNING: streamparse logging is not configured. Please '
'set streamparse.log.path in your config.json.')})
# Redirect stdout and stderr to ensure that print statements/functions
# won't disrupt the multilang protocol
sys.stdout = LogStream(logging.getLogger('streamparse.stdout'))
sys.stderr = LogStream(logging.getLogger('streamparse.stderr'))
_log.info('Received initial handshake message from Storm\n%r', msg)
_log.info('Process ID (%d) sent to Storm', _pid)
return _conf, _context
# Message sending
def send_message(message):
"""Send a message to Storm via stdout."""
if not isinstance(message, dict):
_log.error("%s.%d attempted to send a non dict message to Storm: %r",
_component_name, _pid, message)
return
wrapped_msg = "{}\nend\n".format(json.dumps(message)).encode('utf-8')
with _writer_lock:
_stdout.flush()
_stdout.write(wrapped_msg)
_stdout.flush()
| |
# -*- coding: utf-8 -*-
"""
***
Labelling is T O M B S
It depends on the distance between the baseline and its above and below valid (S) cut
Cuts are SIO
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import numpy as np
from lxml import etree
import shapely.affinity
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
from common.trace import traceln
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_ABPTableSkewed import GraphSkewedCut_H, My_FeatureDefinition_v3, NodeType_PageXml_Cut_Shape, main_command_line
from tasks.DU_ABPTableSkewed import Edge_BL
from tasks.DU_ABPTableSkewed_txtBIO_sepSIO import NodeType_BIESO_to_BIO_Shape
from xml_formats.PageXml import MultiPageXml
from util.Shape import ShapeLoader
#------------------------------------------------------------------------------------------------------
# WE add one feature for _ishort
from crf.Transformer import Transformer
import tasks.DU_ABPTableSkewed
class Block2CutLine_EdgeTransformer_qtty(Transformer):
def transform(self, lEdge):
N = 5
a = np.zeros( ( len(lEdge), 2 * N) , dtype=np.float64)
for i, edge in enumerate(lEdge):
# z = 0 if edge._type < 0 else N # _type is -1 or 1
if edge._type < 0:
z = 0
ishort = 1 if edge.len < GraphSkewedCut_H_TOMBS_lines.iCutCloseDistanceTop else 0
else:
z = N
ishort = 1 if edge.len < GraphSkewedCut_H_TOMBS_lines.iCutCloseDistanceBot else 0
a[i, z:z+N] = (1
, len(edge.B.set_support)
, edge.A._in_edge_up
, edge.A._in_edge_down
, ishort
)
# print(a[i,:].tolist())
# traceln("Block2CutLine_EdgeTransformer", a[:min(100, len(lEdge)),])
return a
tasks.DU_ABPTableSkewed.Block2CutLine_EdgeTransformer_qtty = Block2CutLine_EdgeTransformer_qtty
class Block2CutLine_FakeEdgeTransformer(Transformer):
"""
a fake transformer that return as many features as the union of real ones above
"""
def transform(self, lEdge):
assert not(lEdge)
return np.zeros( ( len(lEdge), 2*8 + 2*5) , dtype=np.float64)
tasks.DU_ABPTableSkewed.Block2CutLine_FakeEdgeTransformer = Block2CutLine_FakeEdgeTransformer
#------------------------------------------------------------------------------------------------------
class GraphSkewedCut_H_TOMBS_lines(GraphSkewedCut_H):
# reflecting text baseline as a LineString
shaper_fun = ShapeLoader.node_to_SingleLine
iCutCloseDistanceTop = 45 # any block close enough become T or S
iCutCloseDistanceBot = 45 # any block close enough become B or S
@classmethod
def showClassParam(cls):
bShown = super().showClassParam()
if bShown:
#also show ours!
traceln(" - iCutCloseDistanceTop : " , cls.iCutCloseDistanceTop)
traceln(" - iCutCloseDistanceBot : " , cls.iCutCloseDistanceBot)
def addEdgeToDoc(self):
"""
To display the grpah conveniently we add new Edge elements
Since we change the BAseline representation, we show the new one
"""
super().addEdgeToDoc()
for blk in self.lNode:
assert blk.type.name in ["row", "sepH"], blk.type.name
if blk.type.name == "row":
ndBaseline = blk.node.xpath(".//pc:Baseline", namespaces=self.dNS)[0]
o = self.shaper_fun(ndBaseline)
MultiPageXml.setPoints(ndBaseline, list(o.coords))
return
"""
To compute TOMBS labels, it is better to use the built graph...
"""
def parseDocLabels(self):
"""
Parse the label of the graph from the dataset, and set the node label
return the set of observed class (set of integers in N+)
"""
# WE expect I or O for text blocks!!
setSeensLabels = super().parseDocLabels()
# now look at edges to compute T M B S
# REMEMBER, we did: edge.len = dist / self.iBlockVisibility
maxLenTop = self.iCutCloseDistanceTop / self.iBlockVisibility
maxLenBot = self.iCutCloseDistanceBot / self.iBlockVisibility
# --- ASSUMPTION !!! ---
T, _O, M, B, S = 0, 1, 2, 3, 4
sepS, _sepI, _sepO = 5, 6, 7
for edge in self.lEdge:
if type(edge) == Edge_BL and edge.B.cls == sepS:
cls = edge.A.cls
if edge._type < 0: # this short edge goes up
if edge.len <= maxLenTop:
# Ok, this will be a T or B or S!
# which means the text block is teh 1st CRF node type
# REMEMBER, we did: edge._type = -1 if blk.y_bslne >= y else +1
if cls == M:
newcls = T
elif cls == B:
newcls = S
else:
continue
edge.A.cls = newcls
setSeensLabels.add(newcls)
else: # sthis hort edge goes down
if edge.len <= maxLenBot:
if cls == M:
newcls = B
elif cls == T:
newcls = S
else:
continue
edge.A.cls = newcls
setSeensLabels.add(newcls)
# traceln(self._dClsByLabel)
return setSeensLabels
class NodeType_BIESO_to_TOMBS_Shape(NodeType_BIESO_to_BIO_Shape):
"""
Convert BIESO labeling to SIOStSmSb
"""
bColumnHeader = False # ignore headers for now
dConverter = { 'B':'M',
'I':'M',
'E':'M',
'S':'M', # St Sm Sb => specific processing to get it
'O':'O',
'CH':'CH'}
def parseDocNodeLabel(self, graph_node, defaultCls=None):
"""
Parse and set the graph node label and return its class index
raise a ValueError if the label is missing while bOther was not True, or if the label is neither a valid one nor an ignored one
"""
domnode = graph_node.node
sXmlLabel = domnode.get(self.sLabelAttr)
# in case we also deal with column headers
if self.bColumnHeader and 'CH' == domnode.get("DU_header"):
sXmlLabel = 'CH'
sXmlLabel = self.dConverter[sXmlLabel]
try:
sLabel = self.dXmlLabel2Label[sXmlLabel]
except KeyError:
raise ValueError("Invalid label '%s'"
" (from @%s or @%s) in node %s"%(sXmlLabel,
self.sLabelAttr,
self.sDefaultLabel,
etree.tostring(domnode)))
# traceln(etree.tostring(domnode), sLabel)
return sLabel
class DU_ABPTableSkewedRowCutLine(DU_CRF_Task):
"""
We will do a CRF model for a DU task
, with the below labels
"""
sXmlFilenamePattern = "*.mpxml" # *_du.* files are now ignored by DU_CRF_Task
iBlockVisibility = None
iLineVisibility = None
fCutHeight = None
bCutAbove = None
lRadAngle = None
#=== CONFIGURATION ====================================================================
@classmethod
def getConfiguredGraphClass(cls):
"""
In this class method, we must return a configured graph class
"""
# Textline labels
# Begin Inside End Single Other
lLabels_TOMBS_blk = ['T', 'O', 'M', 'B', 'S']
# Cut lines:
# Border Ignore Separator Outside
lLabels_SIO_Cut = ['S', 'I', 'O']
#DEFINING THE CLASS OF GRAPH WE USE
DU_GRAPH = GraphSkewedCut_H_TOMBS_lines
DU_GRAPH.iBlockVisibility = cls.iBlockVisibility
DU_GRAPH.iLineVisibility = cls.iLineVisibility
DU_GRAPH.fCutHeight = cls.fCutHeight
DU_GRAPH.bCutAbove = cls.bCutAbove
DU_GRAPH.lRadAngle = cls.lRadAngle
# ROW
ntR = NodeType_BIESO_to_TOMBS_Shape("row"
, lLabels_TOMBS_blk
, None
, False
, None
)
ntR.setLabelAttribute("DU_row")
ntR.setXpathExpr( (".//pc:TextLine" #how to find the nodes
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntR)
# CUT
ntCutH = NodeType_PageXml_Cut_Shape("sepH"
, lLabels_SIO_Cut
, None
, False
, None # equiv. to: BBoxDeltaFun=lambda _: 0
)
ntCutH.setLabelAttribute("DU_type")
ntCutH.setXpathExpr( ('.//pc:CutSeparator[@orient="0"]' #how to find the nodes
# the angle attribute give the true orientation (which is near 0)
, "./pc:TextEquiv") #how to get their text
)
DU_GRAPH.addNodeType(ntCutH)
DU_GRAPH.setClassicNodeTypeList( [ntR ])
return DU_GRAPH
def __init__(self, sModelName, sModelDir,
iBlockVisibility = None,
iLineVisibility = None,
fCutHeight = None,
bCutAbove = None,
lRadAngle = None,
sComment = None,
C=None, tol=None, njobs=None, max_iter=None,
inference_cache=None):
DU_ABPTableSkewedRowCutLine.iBlockVisibility = iBlockVisibility
DU_ABPTableSkewedRowCutLine.iLineVisibility = iLineVisibility
DU_ABPTableSkewedRowCutLine.fCutHeight = fCutHeight
DU_ABPTableSkewedRowCutLine.bCutAbove = True
DU_ABPTableSkewedRowCutLine.lRadAngle = lRadAngle
DU_CRF_Task.__init__(self
, sModelName, sModelDir
, dFeatureConfig = {'row_row':{}, 'row_sepH':{},
'sepH_row':{}, 'sepH_sepH':{},
'sepH':{}, 'row':{}}
, dLearnerConfig = {
'C' : .1 if C is None else C
, 'njobs' : 4 if njobs is None else njobs
, 'inference_cache' : 50 if inference_cache is None else inference_cache
#, 'tol' : .1
, 'tol' : .05 if tol is None else tol
, 'save_every' : 50 #save every 50 iterations,for warm start
, 'max_iter' : 10 if max_iter is None else max_iter
}
, sComment=sComment
#,cFeatureDefinition=FeatureDefinition_PageXml_StandardOnes_noText
,cFeatureDefinition=My_FeatureDefinition_v3
)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main_command_line(DU_ABPTableSkewedRowCutLine)
| |
import py, os
from testing.test_interpreter import BaseTestInterpreter, hippy_fail
class TestFileOps(BaseTestInterpreter):
@py.test.mark.skipif("config.option.runappdirect",
reason="we have <input> only on hippy")
def test___file__(self):
output = self.run('echo __FILE__;')
assert self.space.str_w(output[0]) == "<input>"
def test_require_1(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$got = __FILE__;
?>''')
output = self.run('''
echo require("%s");
echo $got;
''' % f)
assert self.space.int_w(output[0]) == 1
assert os.path.samefile(self.space.str_w(output[1]), str(f))
def test_require_2(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$b = -42;
$olda = $a;
$a = 63;
?>''')
output = self.run('''
$a = 21;
require("%s");
echo $olda, $a, $b;
''' % f)
assert [self.space.int_w(i) for i in output] == [21, 63, -42]
def test_require_with_return(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
return "42";
?>''')
output = self.run('''
echo require("%s");
''' % f)
assert self.space.is_w(output[0], self.space.newstr("42"))
def test_require_with_return_2(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
return;
?>''')
output = self.run('''
echo require("%s");
''' % f)
assert self.space.is_w(output[0], self.space.w_Null)
def test_require_with_ref(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$b1 =& $a1;
$b2 =& $a2;
$c = $b2;
$a2 =& $c;
$olda1 = $b1;
$olda2 = $b2;
$a1 = 63;
?>''')
output = self.run('''
$a1 = 21;
$a2 = &$a1;
require("%s");
echo $a1, $a2, $b1, $b2, $olda1, $olda2, $c;
$c++;
echo $a2;
''' % f)
assert [self.space.int_w(i) for i in output] == [
63, 21, 63, 63, 21, 21, 21, 22]
def test_require_indirect_ref(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$name = 'x';
$$name =& $y;
$y = 5;
?>''')
out, = self.run('''
require("%s");
echo $x;
''' % f)
assert self.space.int_w(out) == 5
def test_require_globals_ref(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$GLOBALS['y'] =& $x;
$y = 5;
?>''')
out, = self.run('''
require('%s');
echo $y;
''' % f)
assert self.space.int_w(out) == 5
def test_require_error(self):
with self.warnings([
'Warning: require(does_not_exist): '
'failed to open stream: No such file or directory',
"Fatal error: require(): Failed opening required "
"'does_not_exist' (include_path=...)"]):
output = self.run('''
$x = require 'does_not_exist';
''')
assert output == []
with self.warnings([
# PHP says 'Inappropriate ioctl for device' instead of the
# expected 'Is a directory'.
'Warning: require(...): failed to open stream: I...',
"Fatal error: require(): Failed opening required "
"'..' (include_path=...)"]):
output = self.run('''
$x = require '..';
''')
assert output == []
def test_require_path(self, tmpdir):
on_path = tmpdir.mkdir('on_path')
on_path.join('a.php').write('''<?php
return 'on_path';
''')
some_dir = tmpdir.mkdir('some_dir')
some_dir.join('a.php').write('''<?php
return 'some_dir';
''')
output = self.run("""
set_include_path('%s');
chdir('%s');
$dir = include 'a.php'; // uses include_path
echo $dir;
$dir = include './a.php'; // relative to CWD
echo $dir;
""" % (on_path, some_dir))
assert map(self.space.str_w, output) == ['on_path', 'some_dir']
def test_require_once(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$x += 1;
return $x;
?>''')
output = self.run('''
$x = 5;
$a = require_once '%(f)s';
echo $x, $x;
$b = require_once '%(f)s';
echo $b, $x;
''' % locals())
assert map(self.space.int_w, output) == [6, 6, 1, 6]
def test_require_normalisation_1(self, tmpdir):
subdir = tmpdir.mkdir('subdir')
a = subdir.join('a.php')
b = subdir.join('b.php')
a.write('''<?php
require_once '../subdir/b.php';
function foo() {}
?>''')
b.write('''<?php
require_once '../subdir/a.php';
?>''')
output = self.run("chdir('%s'); require_once '%s';" % (subdir, a))
assert output == []
def test_require_normalisation_2(self, tmpdir):
dir1 = tmpdir.mkdir('dir1')
a1 = dir1.join('a.php')
dir2 = tmpdir.mkdir('dir2')
a2 = dir2.join('a.php')
a1.write('''<?php
return 'in dir1';
?>''')
a2.write('''<?php
return 'in dir2';
?>''')
output = self.run('''
chdir('%s');
$a = require_once 'a.php';
echo $a;
chdir('%s');
$a = require_once 'a.php';
echo $a;
''' % (dir1, dir2))
assert map(self.space.str_w, output) == ['in dir1', 'in dir2']
def test_include(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$olda = $a;
$a = 63;
?>''')
output = self.run('''
$a = 21;
$b = include "%s";
echo $olda, $a, $b;
''' % f)
assert [self.space.int_w(i) for i in output] == [21, 63, 1]
def test_include_error(self):
with self.warnings([
'Warning: include(does_not_exist): '
'failed to open stream: No such file or directory',
"Warning: include(): Failed opening "
"'does_not_exist' for inclusion (include_path=...)"]):
output = self.run('''
$x = include 'does_not_exist';
echo $x;
''')
assert self.space.is_w(output[0], self.space.w_False)
def test_include_once(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$x += 1;
return $x;
?>''')
output = self.run('''
$x = 5;
$a = include_once '%(f)s';
echo $x, $x;
$b = include_once '%(f)s';
echo $b, $x;
''' % locals())
assert map(self.space.int_w, output) == [6, 6, 1, 6]
def test_include_once_error(self):
with self.warnings([
'Warning: include_once(does_not_exist): '
'failed to open stream: No such file or directory',
"Warning: include_once(): Failed opening "
"'does_not_exist' for inclusion (include_path=...)"]):
output = self.run('''
$x = include_once 'does_not_exist';
echo $x;
''')
assert self.space.is_w(output[0], self.space.w_False)
def test_include_in_func(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
$olda = $a;
$a = 63;
?>''')
output = self.run('''
function f() {
$a = 21;
$b = include "%s";
echo $olda, $a, $b;
}
f();
''' % f)
assert [self.space.int_w(i) for i in output] == [21, 63, 1]
def test_throw_in_include(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
throw new Exception("message");
?>''')
output = self.run('''
try {
include "%s";
} catch (Exception $e) {
echo $e->getMessage();
}
''' % f)
assert self.space.str_w(output[0]) == 'message'
def test_extend_included_class(self, tmpdir):
f = tmpdir.join('x.php')
f.write('''<?php
class A {
function hello() {return 'hello';}
}
?>''')
output = self.run('''
include "%s";
class B extends A {}
$b = new B;
echo $b->hello();
''' % f)
assert self.space.str_w(output[0]) == 'hello'
def test_chgrp_basedir(self):
with self.warnings([
'Warning: chgrp(): open_basedir restriction in effect. File(/proc/cpuinfo) is not within the allowed path(s): (.)']):
output = self.run('''
ini_set('open_basedir', '.');
echo chgrp('/proc/cpuinfo', 1);
''')
assert self.space.str_w(output[0]) == ''
def test_chown_basedir(self):
with self.warnings([
'Warning: chown(): open_basedir restriction in effect. File(/proc/cpuinfo) is not within the allowed path(s): (.)']):
output = self.run('''
ini_set('open_basedir', '.');
echo chown('/proc/cpuinfo', 1);
''')
assert self.space.str_w(output[0]) == ''
def test_dirname_basedir(self):
output = self.run('''
ini_set('open_basedir', '.');
echo dirname('/proc/');
''')
assert self.space.str_w(output[0]) == '/'
def test_disk_total_space_basedir(self):
with self.warnings([
'Warning: disk_total_space(): open_basedir restriction in effect. File(/proc/cpuinfo) is not within the allowed path(s): (.)']):
output = self.run('''
ini_set('open_basedir', '.');
echo disk_total_space('/proc/cpuinfo');
''')
assert self.space.str_w(output[0]) == ''
def test_mkdir_basedir(self):
with self.warnings([
'Warning: mkdir(): open_basedir restriction in effect. File(/proc/cpuinfo/test) is not within the allowed path(s): (.)']):
output = self.run('''
ini_set('open_basedir', '.');
echo mkdir('/proc/cpuinfo/test');
''')
assert self.space.str_w(output[0]) == ''
def test_pathinfo_basedir(self):
output = self.run('''
ini_set('open_basedir', '.');
echo pathinfo('/proc/cpuinfo/test', 1);
''')
assert self.space.str_w(output[0]) == '/proc/cpuinfo'
def test_readfile_basedir(self):
with self.warnings([
"Warning: readfile(): open_basedir restriction in effect. "
"File(/proc/cpuinfo) is not within the allowed path(s): (.)",
"Warning: readfile(/proc/cpuinfo): failed to open "
"stream: Operation not permitted",]):
output = self.run('''
ini_set('open_basedir', '.');
echo readfile('/proc/cpuinfo');
''')
assert self.space.str_w(output[0]) == ''
def test_realpath_basedir(self):
with self.warnings([
"Warning: realpath(): open_basedir restriction in effect. "
"File(/proc/cpuinfo) is not within the allowed path(s): (.)",]):
output = self.run('''
ini_set('open_basedir', '.');
echo realpath('/proc/cpuinfo');
''')
assert self.space.str_w(output[0]) == ''
def test_fresource_basedir(self):
""" check if you can access fileresource created before
basedir, and than narrowed
we can write flush gets and so on, but restrictions have impact"""
# First, make sure that the cwd is where we think it is
os.chdir(os.path.dirname(__file__))
with self.warnings([
"Warning: unlink(): open_basedir restriction in effect. "
"File(to_be_deleted) is not within the allowed path(s): (/tmp)", ]):
output = self.run('''
$fname = 'to_be_deleted';
$h = fopen($fname, 'w');
ini_set('open_basedir', '/tmp');
echo fwrite($h, "test");
echo fflush($h);
echo fgets($h);
echo fclose($h);
echo unlink($fname);
''')
assert self.space.str_w(output[0]) == '4'
assert self.space.str_w(output[1]) == '1'
assert self.space.str_w(output[2]) == ''
assert self.space.str_w(output[3]) == '1'
os.remove('to_be_deleted')
def test_basedir(self):
"we can set open_basedir only once"
output = self.run('''
ini_set('open_basedir', '/tmp');
ini_set('open_basedir', '/home');
echo ini_get('open_basedir');
''')
assert self.space.str_w(output[0]) == '/tmp'
def test_phpmemory(self):
"we can set open_basedir only once"
output = self.run('''
$path = 'php://memory';
$h = fopen($path, "rw+");
fwrite($h, "bugabuga");
echo ftell($h);
fseek($h, 0);
echo fread($h, 1024);
fclose($h);
$h = fopen('php://memory', "rw+");
fseek($h, 0);
echo fread($h, 1024);
''')
assert self.space.str_w(output[0]) == '8'
assert self.space.str_w(output[1]) == 'bugabuga'
assert self.space.str_w(output[2]) == ''
def test_file_filter_rot13(self, tmpdir):
output = self.run('''
$fp = fopen('%s/test.txt', 'w+');
stream_filter_append($fp, "string.rot13", STREAM_FILTER_WRITE);
fwrite($fp, "This is a test\n");
rewind($fp);
echo fread($fp, 1024);
fclose($fp);
''' % tmpdir)
assert self.space.str_w(output[0]) == 'Guvf vf n grfg\n'
def test_file_filter_rot13_err(self):
with self.warnings([
"Warning: stream_filter_append(): unable to locate filter \"wrong_filter\""
]):
output = self.run('''
$fp = fopen('test.txt', 'w+');
stream_filter_append($fp, "wrong_filter", STREAM_FILTER_WRITE);
fwrite($fp, "This is a test\n");
rewind($fp);
echo fread($fp, 1024);
fclose($fp);
''')
assert self.space.str_w(output[0]) == 'This is a test\n'
def test_file_filter_base64(self, tmpdir):
# with tmpdir.as_cwd():
self.run('''
$param = array('line-length' => 8, 'line-break-chars' => "\r\n");
$fp = fopen('%s/test.txt', 'w');
stream_filter_append($fp, 'convert.base64-encode', STREAM_FILTER_WRITE, $param);
fwrite($fp, "This is a test.\n");
fclose($fp);
''' % tmpdir)
with open('%s/test.txt' % tmpdir) as fh:
content = fh.readlines()
assert content[0] == 'VGhpcyBp\r\n'
assert content[1] == 'cyBhIHRl\r\n'
assert content[2] == 'c3QuCg=='
def test_file_fd_1_open_close_write(self):
output = self.run('''
$fp = fopen('php://fd/1', 'r');
fclose($fp);
echo "php";
''')
assert self.space.str_w(output[0]) == 'php'
| |
from config.api2_0_config import *
from config.settings import *
from modules.logger import Log
from on_http_api2_0 import ApiApi as Api
from on_http_api2_0.rest import ApiException
from proboscis.asserts import *
from proboscis import SkipTest
from proboscis import test
from json import dumps, loads
from on_http_api2_0 import rest
import os
import subprocess
import json
import tarfile
import shutil
import requests
from config.amqp import *
from modules.amqp import AMQPWorker
import time
LOG = Log(__name__)
@test(groups=['sel_alert_poller_api2.tests'])
class SELPollerAlertTests(object):
def __init__(self):
self.__client = config.api_client
self.__computeNodes = []
self.__rootDir = "/tmp/tarball/"
self.__skuPackTarball = self.__rootDir + "mytest.tar.gz"
self.__rootDir = "/tmp/tarball/"
self.__task_worker = AMQPWorker(queue=QUEUE_SEL_ALERT,
callbacks=[self.handle_graph_finish])
self.__amqp_alert = {}
self.__bmc_credentials = get_bmc_cred()
self.__skupacknumber = 0
self.delete_skus()# clear any skus
@test(groups=['SEL_alert_poller_api2.tests', 'post_skupacks'])
def post_skupacks(self):
"""Test posting skupacks that starts the sel alert poller"""
#In order for the sel alert poller to be created there should be a
#skupack posted with the alerts in the config.json file
#The code below dynamically creates the skupack rule for each node based on their catalog
Api().nodes_get_all()
all_nodes = loads(self.__client.last_response.data)
for n in all_nodes:
if n.get('type') == 'compute':
# get the catalog of the node
node_id = n.get('id')
Api().nodes_get_catalog_source_by_id(identifier=node_id, source='dmi')
node_catalog_data = loads(self.__client.last_response.data)
# get the node IP
Api().nodes_get_catalog_source_by_id(identifier=node_id, source='bmc')
node_bmc = loads(self.__client.last_response.data)
node_ip = node_bmc['data']['IP Address']
if len(node_catalog_data) > 0:
# Find the size of the SEL and how many entries can it handles
selInfoObj = self.selInfoObj(node_ip, "sel info")
free_bytes = int(selInfoObj['Free Space'][0])
available_sel_entries = free_bytes / 16
self.__computeNodes.append({"node_id":n.get('id'),"node_ip":node_ip,"available_sel_entries":available_sel_entries})
# dynamically update the skupack rule with a value from the cataloged node
node_manufacturer = \
node_catalog_data.get('data').get("System Information").get("Manufacturer").split(" ")[0]
# Generate and post the skupack with the updated rule
self.generateTarball(node_manufacturer)
self.__file = {'file': open(self.__skuPackTarball, 'rb')}
URL = config.host + config.api_root + '/skus/pack'
LOG.info("URL {0}".format(URL))
requests.adapters.DEFAULT_RETRIES = 3
for n in range(0, 5):
try:
LOG.info("Number of attempt to post the skupack : {0}".format(n))
res = requests.post(URL, files=self.__file)
break
except requests.ConnectionError, e:
LOG.info("Request Error {0}: ".format(e))
assert_equal(201, res.status_code, message=res.reason)
@test(groups=['SEL_alert_poller_api2.tests', 'check_pollers'],depends_on_groups=['post_skupacks'])
def check_selEntries_poller(self):
"""Test: Checking that the selEntries pollers have started for all of the compute nodes"""
for n in self.__computeNodes:
found_poller = False
Api().nodes_get_pollers_by_id(identifier=n['node_id'])
pollers = loads(self.__client.last_response.data)
for poller in pollers:
if(poller['config']['command']== "selEntries"):
n['poller_id'] = poller['id']
found_poller = True
assert_equal(found_poller, True)
@test(groups=['SEL_alert_poller_api2.tests', 'inject_single_error'],depends_on_groups=['check_pollers'])
def test_single_entry(self):
"""Test A single alert"""
#The raw command below create the follwing entry
# SEL Record ID : 6e8c
# Record Type : 02
# Timestamp : 01/01/1970 01:15:49
# Generator ID : 0001
# EvM Revision : 04
# Sensor Type : Processor
# Sensor Number : 02
# Event Type : Sensor-specific Discrete
# Event Direction : Deassertion Event
# Event Data : 000000
# Description : IERR
for n in self.__computeNodes:
# Inject a single SEL entry after clearing the sel
self.run_ipmitool_command(n['node_ip'], "sel clear")
self.verify_empty_sel(n['node_ip'])
command = "raw 0x0a 0x44 0x01 0x00 0x02 0xab 0xcd 0xef 0x00 0x01 0x00 0x04 0x07 0x02 0xef 0x00 0x00 0x00"
self.run_ipmitool_command(n['node_ip'], command)
#listen to AMQP
LOG.info('starting amqp listener for node {0}'.format(id))
self.__task_worker = AMQPWorker(queue=QUEUE_SEL_ALERT,
callbacks=[self.handle_graph_finish])
self.__task_worker.start()
#In addition to the ipmitool readout, RackHD adds two elements
# ("Sensor Type Code" & "Event Type Code") to the alert
# validate that the sel raw read is being decoded correctly
assert_equal(self.__amqp_alert["value"]["alerts"][0]["data"]["Description"],"IERR")
assert_equal(self.__amqp_alert["value"]["alerts"][0]["data"]["Event Type Code"], "6f")
assert_equal(self.__amqp_alert["value"]["alerts"][0]["data"]["Sensor Type Code"], "07")
self.__amqp_alert = {}
@test(groups=['SEL_alert_poller_api2.tests', 'sel_overflow_simulation'], depends_on_groups=['inject_single_error'])
def test_sel_overflow(self):
"""Test: SEL overflow simulation """
#This test validates that sel poller alert can handle a SEL with the overflow option turned on.
#In this case when the sel is full the first sel entry in the log won't have record ID 1
#This is could only be simulated on virtual node by issuing a clear command
for n in self.__computeNodes:
self.run_ipmitool_command(n['node_ip'], "sel clear")
self.verify_empty_sel(n['node_ip'])
command = "raw 0x0a 0x44 0x01 0x00 0x02 0xab 0xcd 0xef 0x00 0x01 0x00 0x04 0x07 0x02 0xef 0x00 0x00 0x00"
self.run_ipmitool_command(n['node_ip'], command)
selInfoObj = self.selInfoObj(n['node_ip'], "sel get 0")
initial_first_SEL_entry = int(selInfoObj["SEL Record ID"][0],16)
LOG.info(selInfoObj["SEL Record ID"][0])
self.run_ipmitool_command(n['node_ip'], "sel clear")
self.verify_empty_sel(n['node_ip'])
command = "raw 0x0a 0x44 0x01 0x00 0x02 0xab 0xcd 0xef 0x00 0x01 0x00 0x04 0x07 0x02 0xef 0x00 0x00 0x00"
self.run_ipmitool_command(n['node_ip'], command)
selInfoObj = self.selInfoObj(n['node_ip'], "sel get 0")
LOG.info(selInfoObj["SEL Record ID"][0])
new_first_SEL_entry = int(selInfoObj["SEL Record ID"][0],16)
if(new_first_SEL_entry != 0):
LOG.info("Succesfully simulated the SEL overflow behavior")
else:
LOG.info("Couldn't simulate the SEL overflow behavior")
assert_equal(new_first_SEL_entry,initial_first_SEL_entry + 1)
@test(groups=['SEL_alert_poller_api2.tests', 'inject_full_sel'],depends_on_groups=['inject_single_error'])
def test_full_sel(self):
"""Test: Full sel log"""
#Validate the poller can digest data from a full sel log all at once
for n in self.__computeNodes:
# listen to AMQP
LOG.info('starting amqp listener for node {0}'.format(id))
self.__task_worker = AMQPWorker(queue=QUEUE_SEL_ALERT,
callbacks=[self.handle_graph_finish])
self.run_ipmitool_command(n['node_ip'], "sel clear")
self.verify_empty_sel(n['node_ip'])
self.create_selEntries_file(n["available_sel_entries"])
self.__amqp_alert = {}
self.run_ipmitool_command((n['node_ip']), "sel add /tmp/selError.txt")
#time.sleep(1)
self.__task_worker.start()
assert_equal(len(self.__amqp_alert["value"]["alerts"]), n["available_sel_entries"])
#self.__amqp_alert = {}
def run_ipmitool_command(self, ip ,command):
ipmitool_command = "ipmitool -I lanplus -H " + ip +" -U " + self.__bmc_credentials[0] +" -P " + self.__bmc_credentials[1] + " " + command
f = os.popen( ipmitool_command)
ipmi_return = f.read()
LOG.info("ipmi ipmitool_command: {0}".format(ipmitool_command))
return ipmi_return
def verify_empty_sel(self,ip,entries=None):
#recursive function that check that the SEL has been cleared
#This is function is more efficient than using a sleep/wait method
#especially when running on actual hardware
if entries > 1 or entries == None :
selInfoObj = self.selInfoObj(ip, "sel info")
entries = int(selInfoObj['Entries'][0])
time.sleep(0.5)
self.verify_empty_sel(ip,entries)
else:
return
def handle_graph_finish(self,body,message):
routeId = message.delivery_info.get('routing_key').split('poller.alert.sel.')[1]
assert_not_equal(routeId,None)
message.ack()
self.__amqp_alert = body
self.__task_worker.stop()
def generateTarball(self, ruleUpdate=None):
#This function genetare a skupack tarball with a cutome rule
if os.path.isdir(self.__rootDir):
shutil.rmtree(self.__rootDir)
os.mkdir(self.__rootDir)
tarballDirs = ["profiles", "static", "tasks", "templates", "workflows"]
for dir in tarballDirs:
os.mkdir(self.__rootDir + dir)
self.__skupacknumber = self.__skupacknumber +1
name ="skupack_"+ str(self.__skupacknumber)
self.__config_json = {
"name": name,
"rules": [
{
"path": "dmi.System Information.Manufacturer",
"contains": "Quanta"
}
],
"skuConfig": {
"value1": {
"value": "value"
},
"sel": {
"alerts": [
{
"Event Type Code": "01",
"Description": "/.+Non-critical going.+/",
"action": "warning"
},
{
"Event Type Code": "01",
"Description": "/(.+Critical going.+)|(Lower Non-recoverable going low)|(Upper Non-recoverable going high)/",
"action": "critical"
},
{
"Sensor Type Code": "07",
"Event Type Code": "6f",
"Event Data": "/050000|080000|0a0000/",
"action": "warning"
},
{
"Sensor Type Code": "07",
"Event Type Code": "6f",
"Event Data": "/000000|010000|020000|030000|040000|060000|0b0000/",
"action": "critical"
},
{
"Event Data": "00ffff",
"action": "warning"
},
{
"Sensor Type": "Event Logging Disabled",
"Description": "Log full",
"Event Direction": "Assertion Event",
"action": "warning"
}
]
}
},
"workflowRoot": "workflows",
"taskRoot": "tasks",
"httpProfileRoot": "profiles",
"httpTemplateRoot": "templates",
"httpStaticRoot": "static"
}
if (ruleUpdate != None):
self.__config_json['rules'][0]['contains'] = ruleUpdate
with open(self.__rootDir + 'config.json', 'w') as f:
json.dump(self.__config_json, f)
f.close()
os.chdir(self.__rootDir)
with tarfile.open(self.__rootDir + "mytest.tar.gz", mode="w:gz") as f:
for name in ["config.json", "profiles", "static", "tasks", "templates", "workflows"]:
f.add(name)
def delete_skus(self):
#delete all the skus before post new skus with the right rule that match the nodes
Api().skus_get()
rsp = self.__client.last_response
data = loads(self.__client.last_response.data)
for item in data:
Api().skus_id_delete(item.get("id"))
def create_selEntries_file(self, numberOfEntries):
#This function create a file of sel entries in order to be writen into the SEl
entries = "0x04 0x09 0x01 0x6f 0x00 0xff 0xff # Power Unit #0x01 Power off/down"
singleEntry = "0x04 0x09 0x01 0x6f 0x00 0xff 0xff # Power Unit #0x01 Power off/down"
for index in range(numberOfEntries-1):
entries = entries + '\n' + singleEntry
with open('/tmp/selError.txt', 'w') as f:
f.write(entries)
f.close()
def selInfoObj(self,node_ip, command):
#return the sel info in a dictionary format
selInfoUnprocessed = self.run_ipmitool_command(node_ip, command)
selInfoArray = selInfoUnprocessed.split('\n')
selInfoObj = {}
for entry in selInfoArray:
keyVal = entry.split(':')
keyVal[0] = keyVal[0].rstrip()
if (entry.find(':') != -1):
keyVal[1] = keyVal[1].strip().split()
selInfoObj[keyVal[0]] = keyVal[1]
else:
selInfoObj[keyVal[0]] = None
return selInfoObj
| |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from pybuilder.terminal import print_text_line
try:
_input = raw_input
except NameError:
_input = input
DEFAULT_SOURCE_DIRECTORY = 'src/main/python'
DEFAULT_UNITTEST_DIRECTORY = 'src/unittest/python'
DEFAULT_SCRIPTS_DIRECTORY = 'src/main/scripts'
DEFAULT_DOCS_DIRECTORY = 'docs'
PLUGINS_TO_SUGGEST = ['python.flake8', 'python.coverage', 'python.distutils']
def prompt_user(description, default):
message = "{0} (default: '{1}') : ".format(description, default)
return _input(message)
def collect_project_information():
default_project_name = os.path.basename(os.getcwd())
project_name = prompt_user('Project name', default_project_name) or default_project_name
scaffolding = PythonProjectScaffolding(project_name)
dir_source_main_python = prompt_user('Source directory', DEFAULT_SOURCE_DIRECTORY)
dir_docs = prompt_user('Docs directory', DEFAULT_DOCS_DIRECTORY)
dir_source_unittest_python = prompt_user(
'Unittest directory', DEFAULT_UNITTEST_DIRECTORY)
dir_source_main_scripts = prompt_user("Scripts directory", DEFAULT_SCRIPTS_DIRECTORY)
plugins = suggest_plugins(PLUGINS_TO_SUGGEST)
scaffolding.add_plugins(plugins)
if dir_source_main_python:
scaffolding.dir_source_main_python = dir_source_main_python
if dir_source_unittest_python:
scaffolding.dir_source_unittest_python = dir_source_unittest_python
if dir_source_main_scripts:
scaffolding.dir_source_main_scripts = dir_source_main_scripts
if dir_docs:
scaffolding.dir_docs = dir_docs
return scaffolding
def suggest_plugins(plugins):
chosen_plugins = [plugin for plugin in [suggest(plugin) for plugin in plugins] if plugin]
return chosen_plugins
def suggest(plugin):
choice = prompt_user('Use plugin %s (Y/n)?' % plugin, 'y')
plugin_enabled = not choice or choice.lower() == 'y'
return plugin if plugin_enabled else None
def start_project():
try:
scaffolding = collect_project_information()
except KeyboardInterrupt:
print_text_line('\nCanceled.')
return 1
descriptor = scaffolding.render_build_descriptor()
with open('build.py', 'w') as build_descriptor_file:
build_descriptor_file.write(descriptor)
scaffolding.set_up_project()
_create_setup_file()
return 0
def update_project():
_create_setup_file()
return 0
def _create_setup_file():
setup_py_file_contents = '''#!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script allows to support installation via:
# pip install git+git://<project>@<branch>
#
# This script is designed to be used in combination with `pip install` ONLY
#
# DO NOT RUN MANUALLY
#
import os
import subprocess
import sys
import glob
import shutil
from sys import version_info
py3 = version_info[0] == 3
py2 = not py3
if py2:
FileNotFoundError = OSError
def install_pyb():
try:
subprocess.check_call([sys.executable, "-m", "pip.__main__", "install", "pybuilder"])
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
script_dir = os.path.dirname(os.path.realpath(__file__))
exit_code = 0
try:
subprocess.check_call(["pyb", "--version"])
except FileNotFoundError as e:
if py3 or py2 and e.errno == 2:
install_pyb()
else:
raise
except subprocess.CalledProcessError as e:
if e.returncode == 127:
install_pyb()
else:
sys.exit(e.returncode)
try:
subprocess.check_call(["pyb", "clean", "install_build_dependencies", "package", "-o"])
dist_dir = glob.glob(os.path.join(script_dir, "target", "dist", "*"))[0]
for src_file in glob.glob(os.path.join(dist_dir, "*")):
file_name = os.path.basename(src_file)
target_file_name = os.path.join(script_dir, file_name)
if os.path.exists(target_file_name):
if os.path.isdir(target_file_name):
shutil.rmtree(target_file_name)
else:
os.remove(target_file_name)
shutil.move(src_file, script_dir)
setup_args = sys.argv[1:]
subprocess.check_call([sys.executable, "setup.py"] + setup_args, cwd=script_dir)
except subprocess.CalledProcessError as e:
exit_code = e.returncode
sys.exit(exit_code)
'''
if os.path.exists("setup.py"):
choice = prompt_user("Overwrite 'setup.py' (y/N)?", 'n')
overwrite = not choice or choice.lower() == 'y'
if not overwrite:
return
os.unlink("setup.py")
with open('setup.py', 'w') as setup_descriptor_file:
setup_descriptor_file.write(setup_py_file_contents)
print_text_line("\nCreated 'setup.py'.")
class PythonProjectScaffolding(object):
DESCRIPTOR_TEMPLATE = string.Template("""\
from pybuilder.core import $core_imports
$activated_plugins
name = "${project_name}"
default_task = "publish"
$initializer
""")
INITIALIZER_HEAD = '''@init
def set_properties(project):
'''
def __init__(self, project_name):
self.project_name = project_name
self.dir_source_main_python = DEFAULT_SOURCE_DIRECTORY
self.dir_source_unittest_python = DEFAULT_UNITTEST_DIRECTORY
self.dir_source_main_scripts = DEFAULT_SCRIPTS_DIRECTORY
self.dir_docs = DEFAULT_DOCS_DIRECTORY
self.core_imports = ['use_plugin']
self.plugins = ['python.core', 'python.unittest', 'python.install_dependencies']
self.initializer = ''
def add_plugins(self, plugins):
self.plugins.extend(plugins)
def render_build_descriptor(self):
self.build_initializer()
self.build_imports()
self.core_imports = ', '.join(self.core_imports)
return self.DESCRIPTOR_TEMPLATE.substitute(self.__dict__)
def build_imports(self):
self.activated_plugins = '\n'.join(['use_plugin("%s")' % plugin for plugin in self.plugins])
def build_initializer(self):
self.core_imports.append('init')
properties_to_set = []
if not self.is_default_source_main_python:
properties_to_set.append(('dir_source_main_python', self.dir_source_main_python))
if not self.is_default_source_unittest_python:
properties_to_set.append(('dir_source_unittest_python', self.dir_source_unittest_python))
if not self.is_default_source_main_scripts:
properties_to_set.append(('dir_source_main_scripts', self.dir_source_main_scripts))
if not self.is_default_docs:
properties_to_set.append(('dir_docs', self.dir_docs))
initializer_body = self._build_initializer_body_with_properties(properties_to_set)
self.initializer = self.INITIALIZER_HEAD + initializer_body
@property
def is_default_source_main_python(self):
return self.dir_source_main_python == DEFAULT_SOURCE_DIRECTORY
@property
def is_default_source_unittest_python(self):
return self.dir_source_unittest_python == DEFAULT_UNITTEST_DIRECTORY
@property
def is_default_docs(self):
return self.dir_docs == DEFAULT_DOCS_DIRECTORY
@property
def is_default_source_main_scripts(self):
return self.dir_source_main_scripts == DEFAULT_SCRIPTS_DIRECTORY
def set_up_project(self):
for needed_directory in (self.dir_source_main_python,
self.dir_source_unittest_python,
self.dir_docs,
self.dir_source_main_scripts):
if not os.path.exists(needed_directory):
os.makedirs(needed_directory)
@staticmethod
def _build_initializer_body_with_properties(properties_to_set):
initializer_body = ''
initializer_body += '\n'.join(
[' project.set_property("{0}", "{1}")'.format(k, v) for k, v in properties_to_set])
if not initializer_body:
initializer_body += ' pass'
return initializer_body
| |
# encoding: utf-8
# -*- test-case-name: IPython.kernel.tests.test_engineservice -*-
"""A Twisted Service Representation of the IPython core.
The IPython Core exposed to the network is called the Engine. Its
representation in Twisted in the EngineService. Interfaces and adapters
are used to abstract out the details of the actual network protocol used.
The EngineService is an Engine that knows nothing about the actual protocol
used.
The EngineService is exposed with various network protocols in modules like:
enginepb.py
enginevanilla.py
As of 12/12/06 the classes in this module have been simplified greatly. It was
felt that we had over-engineered things. To improve the maintainability of the
code we have taken out the ICompleteEngine interface and the completeEngine
method that automatically added methods to engines.
"""
__docformat__ = "restructuredtext en"
# Tell nose to skip this module
__test__ = {}
#-------------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
import copy
import sys
import cPickle as pickle
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.python import log, failure, components
import zope.interface as zi
from IPython.kernel.core.interpreter import Interpreter
from IPython.kernel import newserialized, error
#-------------------------------------------------------------------------------
# Interface specification for the Engine
#-------------------------------------------------------------------------------
class IEngineCore(zi.Interface):
"""The minimal required interface for the IPython Engine.
This interface provides a formal specification of the IPython core.
All these methods should return deferreds regardless of what side of a
network connection they are on.
In general, this class simply wraps a shell class and wraps its return
values as Deferred objects. If the underlying shell class method raises
an exception, this class should convert it to a twisted.failure.Failure
that will be propagated along the Deferred's errback chain.
In addition, Failures are aggressive. By this, we mean that if a method
is performing multiple actions (like pulling multiple object) if any
single one fails, the entire method will fail with that Failure. It is
all or nothing.
"""
id = zi.interface.Attribute("the id of the Engine object")
properties = zi.interface.Attribute("A dict of properties of the Engine")
def execute(lines):
"""Execute lines of Python code.
Returns a dictionary with keys (id, number, stdin, stdout, stderr)
upon success.
Returns a failure object if the execution of lines raises an exception.
"""
def push(namespace):
"""Push dict namespace into the user's namespace.
Returns a deferred to None or a failure.
"""
def pull(keys):
"""Pulls values out of the user's namespace by keys.
Returns a deferred to a tuple objects or a single object.
Raises NameError if any one of objects doess not exist.
"""
def push_function(namespace):
"""Push a dict of key, function pairs into the user's namespace.
Returns a deferred to None or a failure."""
def pull_function(keys):
"""Pulls functions out of the user's namespace by keys.
Returns a deferred to a tuple of functions or a single function.
Raises NameError if any one of the functions does not exist.
"""
def get_result(i=None):
"""Get the stdin/stdout/stderr of command i.
Returns a deferred to a dict with keys
(id, number, stdin, stdout, stderr).
Raises IndexError if command i does not exist.
Raises TypeError if i in not an int.
"""
def reset():
"""Reset the shell.
This clears the users namespace. Won't cause modules to be
reloaded. Should also re-initialize certain variables like id.
"""
def kill():
"""Kill the engine by stopping the reactor."""
def keys():
"""Return the top level variables in the users namspace.
Returns a deferred to a dict."""
class IEngineSerialized(zi.Interface):
"""Push/Pull methods that take Serialized objects.
All methods should return deferreds.
"""
def push_serialized(namespace):
"""Push a dict of keys and Serialized objects into the user's namespace."""
def pull_serialized(keys):
"""Pull objects by key from the user's namespace as Serialized.
Returns a list of or one Serialized.
Raises NameError is any one of the objects does not exist.
"""
class IEngineProperties(zi.Interface):
"""Methods for access to the properties object of an Engine"""
properties = zi.Attribute("A StrictDict object, containing the properties")
def set_properties(properties):
"""set properties by key and value"""
def get_properties(keys=None):
"""get a list of properties by `keys`, if no keys specified, get all"""
def del_properties(keys):
"""delete properties by `keys`"""
def has_properties(keys):
"""get a list of bool values for whether `properties` has `keys`"""
def clear_properties():
"""clear the properties dict"""
class IEngineBase(IEngineCore, IEngineSerialized, IEngineProperties):
"""The basic engine interface that EngineService will implement.
This exists so it is easy to specify adapters that adapt to and from the
API that the basic EngineService implements.
"""
pass
class IEngineQueued(IEngineBase):
"""Interface for adding a queue to an IEngineBase.
This interface extends the IEngineBase interface to add methods for managing
the engine's queue. The implicit details of this interface are that the
execution of all methods declared in IEngineBase should appropriately be
put through a queue before execution.
All methods should return deferreds.
"""
def clear_queue():
"""Clear the queue."""
def queue_status():
"""Get the queued and pending commands in the queue."""
def register_failure_observer(obs):
"""Register an observer of pending Failures.
The observer must implement IFailureObserver.
"""
def unregister_failure_observer(obs):
"""Unregister an observer of pending Failures."""
class IEngineThreaded(zi.Interface):
"""A place holder for threaded commands.
All methods should return deferreds.
"""
pass
#-------------------------------------------------------------------------------
# Functions and classes to implement the EngineService
#-------------------------------------------------------------------------------
class StrictDict(dict):
"""This is a strict copying dictionary for use as the interface to the
properties of an Engine.
:IMPORTANT:
This object copies the values you set to it, and returns copies to you
when you request them. The only way to change properties os explicitly
through the setitem and getitem of the dictionary interface.
Example:
>>> e = get_engine(id)
>>> L = [1,2,3]
>>> e.properties['L'] = L
>>> L == e.properties['L']
True
>>> L.append(99)
>>> L == e.properties['L']
False
Note that getitem copies, so calls to methods of objects do not affect
the properties, as seen here:
>>> e.properties[1] = range(2)
>>> print e.properties[1]
[0, 1]
>>> e.properties[1].append(2)
>>> print e.properties[1]
[0, 1]
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.modified = True
def __getitem__(self, key):
return copy.deepcopy(dict.__getitem__(self, key))
def __setitem__(self, key, value):
# check if this entry is valid for transport around the network
# and copying
try:
pickle.dumps(key, 2)
pickle.dumps(value, 2)
newvalue = copy.deepcopy(value)
except Exception, e:
raise error.InvalidProperty("can't be a value: %r" % value)
dict.__setitem__(self, key, newvalue)
self.modified = True
def __delitem__(self, key):
dict.__delitem__(self, key)
self.modified = True
def update(self, dikt):
for k,v in dikt.iteritems():
self[k] = v
def pop(self, key):
self.modified = True
return dict.pop(self, key)
def popitem(self):
self.modified = True
return dict.popitem(self)
def clear(self):
self.modified = True
dict.clear(self)
def subDict(self, *keys):
d = {}
for key in keys:
d[key] = self[key]
return d
class EngineAPI(object):
"""This is the object through which the user can edit the `properties`
attribute of an Engine.
The Engine Properties object copies all object in and out of itself.
See the EngineProperties object for details.
"""
_fix=False
def __init__(self, id):
self.id = id
self.properties = StrictDict()
self._fix=True
def __setattr__(self, k,v):
if self._fix:
raise error.KernelError("I am protected!")
else:
object.__setattr__(self, k, v)
def __delattr__(self, key):
raise error.KernelError("I am protected!")
_apiDict = {}
def get_engine(id):
"""Get the Engine API object, whcih currently just provides the properties
object, by ID"""
global _apiDict
if not _apiDict.get(id):
_apiDict[id] = EngineAPI(id)
return _apiDict[id]
def drop_engine(id):
"""remove an engine"""
global _apiDict
if _apiDict.has_key(id):
del _apiDict[id]
class EngineService(object, service.Service):
"""Adapt a IPython shell into a IEngine implementing Twisted Service."""
zi.implements(IEngineBase)
name = 'EngineService'
def __init__(self, shellClass=Interpreter, mpi=None):
"""Create an EngineService.
shellClass: something that implements IInterpreter or core1
mpi: an mpi module that has rank and size attributes
"""
self.shellClass = shellClass
self.shell = self.shellClass()
self.mpi = mpi
self.id = None
self.properties = get_engine(self.id).properties
if self.mpi is not None:
log.msg("MPI started with rank = %i and size = %i" %
(self.mpi.rank, self.mpi.size))
self.id = self.mpi.rank
self._seedNamespace()
# Make id a property so that the shell can get the updated id
def _setID(self, id):
self._id = id
self.properties = get_engine(id).properties
self.shell.push({'id': id})
def _getID(self):
return self._id
id = property(_getID, _setID)
def _seedNamespace(self):
self.shell.push({'mpi': self.mpi, 'id' : self.id})
def executeAndRaise(self, msg, callable, *args, **kwargs):
"""Call a method of self.shell and wrap any exception."""
d = defer.Deferred()
try:
result = callable(*args, **kwargs)
except:
# This gives the following:
# et=exception class
# ev=exception class instance
# tb=traceback object
et,ev,tb = sys.exc_info()
# This call adds attributes to the exception value
et,ev,tb = self.shell.formatTraceback(et,ev,tb,msg)
# Add another attribute
ev._ipython_engine_info = msg
f = failure.Failure(ev,et,tb)
d.errback(f)
else:
d.callback(result)
return d
# The IEngine methods. See the interface for documentation.
def execute(self, lines):
msg = {'engineid':self.id,
'method':'execute',
'args':[lines]}
d = self.executeAndRaise(msg, self.shell.execute, lines)
d.addCallback(self.addIDToResult)
return d
def addIDToResult(self, result):
result['id'] = self.id
return result
def push(self, namespace):
msg = {'engineid':self.id,
'method':'push',
'args':[repr(namespace.keys())]}
d = self.executeAndRaise(msg, self.shell.push, namespace)
return d
def pull(self, keys):
msg = {'engineid':self.id,
'method':'pull',
'args':[repr(keys)]}
d = self.executeAndRaise(msg, self.shell.pull, keys)
return d
def push_function(self, namespace):
msg = {'engineid':self.id,
'method':'push_function',
'args':[repr(namespace.keys())]}
d = self.executeAndRaise(msg, self.shell.push_function, namespace)
return d
def pull_function(self, keys):
msg = {'engineid':self.id,
'method':'pull_function',
'args':[repr(keys)]}
d = self.executeAndRaise(msg, self.shell.pull_function, keys)
return d
def get_result(self, i=None):
msg = {'engineid':self.id,
'method':'get_result',
'args':[repr(i)]}
d = self.executeAndRaise(msg, self.shell.getCommand, i)
d.addCallback(self.addIDToResult)
return d
def reset(self):
msg = {'engineid':self.id,
'method':'reset',
'args':[]}
del self.shell
self.shell = self.shellClass()
self.properties.clear()
d = self.executeAndRaise(msg, self._seedNamespace)
return d
def kill(self):
drop_engine(self.id)
try:
reactor.stop()
except RuntimeError:
log.msg('The reactor was not running apparently.')
return defer.fail()
else:
return defer.succeed(None)
def keys(self):
"""Return a list of variables names in the users top level namespace.
This used to return a dict of all the keys/repr(values) in the
user's namespace. This was too much info for the ControllerService
to handle so it is now just a list of keys.
"""
remotes = []
for k in self.shell.user_ns.iterkeys():
if k not in ['__name__', '_ih', '_oh', '__builtins__',
'In', 'Out', '_', '__', '___', '__IP', 'input', 'raw_input']:
remotes.append(k)
return defer.succeed(remotes)
def set_properties(self, properties):
msg = {'engineid':self.id,
'method':'set_properties',
'args':[repr(properties.keys())]}
return self.executeAndRaise(msg, self.properties.update, properties)
def get_properties(self, keys=None):
msg = {'engineid':self.id,
'method':'get_properties',
'args':[repr(keys)]}
if keys is None:
keys = self.properties.keys()
return self.executeAndRaise(msg, self.properties.subDict, *keys)
def _doDel(self, keys):
for key in keys:
del self.properties[key]
def del_properties(self, keys):
msg = {'engineid':self.id,
'method':'del_properties',
'args':[repr(keys)]}
return self.executeAndRaise(msg, self._doDel, keys)
def _doHas(self, keys):
return [self.properties.has_key(key) for key in keys]
def has_properties(self, keys):
msg = {'engineid':self.id,
'method':'has_properties',
'args':[repr(keys)]}
return self.executeAndRaise(msg, self._doHas, keys)
def clear_properties(self):
msg = {'engineid':self.id,
'method':'clear_properties',
'args':[]}
return self.executeAndRaise(msg, self.properties.clear)
def push_serialized(self, sNamespace):
msg = {'engineid':self.id,
'method':'push_serialized',
'args':[repr(sNamespace.keys())]}
ns = {}
for k,v in sNamespace.iteritems():
try:
unserialized = newserialized.IUnSerialized(v)
ns[k] = unserialized.getObject()
except:
return defer.fail()
return self.executeAndRaise(msg, self.shell.push, ns)
def pull_serialized(self, keys):
msg = {'engineid':self.id,
'method':'pull_serialized',
'args':[repr(keys)]}
if isinstance(keys, str):
keys = [keys]
if len(keys)==1:
d = self.executeAndRaise(msg, self.shell.pull, keys)
d.addCallback(newserialized.serialize)
return d
elif len(keys)>1:
d = self.executeAndRaise(msg, self.shell.pull, keys)
@d.addCallback
def packThemUp(values):
serials = []
for v in values:
try:
serials.append(newserialized.serialize(v))
except:
return defer.fail(failure.Failure())
return serials
return packThemUp
def queue(methodToQueue):
def queuedMethod(this, *args, **kwargs):
name = methodToQueue.__name__
return this.submitCommand(Command(name, *args, **kwargs))
return queuedMethod
class QueuedEngine(object):
"""Adapt an IEngineBase to an IEngineQueued by wrapping it.
The resulting object will implement IEngineQueued which extends
IEngineCore which extends (IEngineBase, IEngineSerialized).
This seems like the best way of handling it, but I am not sure. The
other option is to have the various base interfaces be used like
mix-in intefaces. The problem I have with this is adpatation is
more difficult and complicated because there can be can multiple
original and final Interfaces.
"""
zi.implements(IEngineQueued)
def __init__(self, engine):
"""Create a QueuedEngine object from an engine
engine: An implementor of IEngineCore and IEngineSerialized
keepUpToDate: whether to update the remote status when the
queue is empty. Defaults to False.
"""
# This is the right way to do these tests rather than
# IEngineCore in list(zi.providedBy(engine)) which will only
# picks of the interfaces that are directly declared by engine.
assert IEngineBase.providedBy(engine), \
"engine passed to QueuedEngine doesn't provide IEngineBase"
self.engine = engine
self.id = engine.id
self.queued = []
self.history = {}
self.engineStatus = {}
self.currentCommand = None
self.failureObservers = []
def _get_properties(self):
return self.engine.properties
properties = property(_get_properties, lambda self, _: None)
# Queue management methods. You should not call these directly
def submitCommand(self, cmd):
"""Submit command to queue."""
d = defer.Deferred()
cmd.setDeferred(d)
if self.currentCommand is not None:
if self.currentCommand.finished:
# log.msg("Running command immediately: %r" % cmd)
self.currentCommand = cmd
self.runCurrentCommand()
else: # command is still running
# log.msg("Command is running: %r" % self.currentCommand)
# log.msg("Queueing: %r" % cmd)
self.queued.append(cmd)
else:
# log.msg("No current commands, running: %r" % cmd)
self.currentCommand = cmd
self.runCurrentCommand()
return d
def runCurrentCommand(self):
"""Run current command."""
cmd = self.currentCommand
f = getattr(self.engine, cmd.remoteMethod, None)
if f:
d = f(*cmd.args, **cmd.kwargs)
if cmd.remoteMethod is 'execute':
d.addCallback(self.saveResult)
d.addCallback(self.finishCommand)
d.addErrback(self.abortCommand)
else:
return defer.fail(AttributeError(cmd.remoteMethod))
def _flushQueue(self):
"""Pop next command in queue and run it."""
if len(self.queued) > 0:
self.currentCommand = self.queued.pop(0)
self.runCurrentCommand()
def saveResult(self, result):
"""Put the result in the history."""
self.history[result['number']] = result
return result
def finishCommand(self, result):
"""Finish currrent command."""
# The order of these commands is absolutely critical.
self.currentCommand.handleResult(result)
self.currentCommand.finished = True
self._flushQueue()
return result
def abortCommand(self, reason):
"""Abort current command.
This eats the Failure but first passes it onto the Deferred that the
user has.
It also clear out the queue so subsequence commands don't run.
"""
# The order of these 3 commands is absolutely critical. The currentCommand
# must first be marked as finished BEFORE the queue is cleared and before
# the current command is sent the failure.
# Also, the queue must be cleared BEFORE the current command is sent the Failure
# otherwise the errback chain could trigger new commands to be added to the
# queue before we clear it. We should clear ONLY the commands that were in
# the queue when the error occured.
self.currentCommand.finished = True
s = "%r %r %r" % (self.currentCommand.remoteMethod, self.currentCommand.args, self.currentCommand.kwargs)
self.clear_queue(msg=s)
self.currentCommand.handleError(reason)
return None
#---------------------------------------------------------------------------
# IEngineCore methods
#---------------------------------------------------------------------------
@queue
def execute(self, lines):
pass
@queue
def push(self, namespace):
pass
@queue
def pull(self, keys):
pass
@queue
def push_function(self, namespace):
pass
@queue
def pull_function(self, keys):
pass
def get_result(self, i=None):
if i is None:
i = max(self.history.keys()+[None])
cmd = self.history.get(i, None)
# Uncomment this line to disable chaching of results
#cmd = None
if cmd is None:
return self.submitCommand(Command('get_result', i))
else:
return defer.succeed(cmd)
def reset(self):
self.clear_queue()
self.history = {} # reset the cache - I am not sure we should do this
return self.submitCommand(Command('reset'))
def kill(self):
self.clear_queue()
return self.submitCommand(Command('kill'))
@queue
def keys(self):
pass
#---------------------------------------------------------------------------
# IEngineSerialized methods
#---------------------------------------------------------------------------
@queue
def push_serialized(self, namespace):
pass
@queue
def pull_serialized(self, keys):
pass
#---------------------------------------------------------------------------
# IEngineProperties methods
#---------------------------------------------------------------------------
@queue
def set_properties(self, namespace):
pass
@queue
def get_properties(self, keys=None):
pass
@queue
def del_properties(self, keys):
pass
@queue
def has_properties(self, keys):
pass
@queue
def clear_properties(self):
pass
#---------------------------------------------------------------------------
# IQueuedEngine methods
#---------------------------------------------------------------------------
def clear_queue(self, msg=''):
"""Clear the queue, but doesn't cancel the currently running commmand."""
for cmd in self.queued:
cmd.deferred.errback(failure.Failure(error.QueueCleared(msg)))
self.queued = []
return defer.succeed(None)
def queue_status(self):
if self.currentCommand is not None:
if self.currentCommand.finished:
pending = repr(None)
else:
pending = repr(self.currentCommand)
else:
pending = repr(None)
dikt = {'queue':map(repr,self.queued), 'pending':pending}
return defer.succeed(dikt)
def register_failure_observer(self, obs):
self.failureObservers.append(obs)
def unregister_failure_observer(self, obs):
self.failureObservers.remove(obs)
# Now register QueuedEngine as an adpater class that makes an IEngineBase into a
# IEngineQueued.
components.registerAdapter(QueuedEngine, IEngineBase, IEngineQueued)
class Command(object):
"""A command object that encapslates queued commands.
This class basically keeps track of a command that has been queued
in a QueuedEngine. It manages the deferreds and hold the method to be called
and the arguments to that method.
"""
def __init__(self, remoteMethod, *args, **kwargs):
"""Build a new Command object."""
self.remoteMethod = remoteMethod
self.args = args
self.kwargs = kwargs
self.finished = False
def setDeferred(self, d):
"""Sets the deferred attribute of the Command."""
self.deferred = d
def __repr__(self):
if not self.args:
args = ''
else:
args = str(self.args)[1:-2] #cut off (...,)
for k,v in self.kwargs.iteritems():
if args:
args += ', '
args += '%s=%r' %(k,v)
return "%s(%s)" %(self.remoteMethod, args)
def handleResult(self, result):
"""When the result is ready, relay it to self.deferred."""
self.deferred.callback(result)
def handleError(self, reason):
"""When an error has occured, relay it to self.deferred."""
self.deferred.errback(reason)
class ThreadedEngineService(EngineService):
"""An EngineService subclass that defers execute commands to a separate
thread.
ThreadedEngineService uses twisted.internet.threads.deferToThread to
defer execute requests to a separate thread. GUI frontends may want to
use ThreadedEngineService as the engine in an
IPython.frontend.frontendbase.FrontEndBase subclass to prevent
block execution from blocking the GUI thread.
"""
zi.implements(IEngineBase)
def __init__(self, shellClass=Interpreter, mpi=None):
EngineService.__init__(self, shellClass, mpi)
def wrapped_execute(self, msg, lines):
"""Wrap self.shell.execute to add extra information to tracebacks"""
try:
result = self.shell.execute(lines)
except Exception,e:
# This gives the following:
# et=exception class
# ev=exception class instance
# tb=traceback object
et,ev,tb = sys.exc_info()
# This call adds attributes to the exception value
et,ev,tb = self.shell.formatTraceback(et,ev,tb,msg)
# Add another attribute
# Create a new exception with the new attributes
e = et(ev._ipython_traceback_text)
e._ipython_engine_info = msg
# Re-raise
raise e
return result
def execute(self, lines):
# Only import this if we are going to use this class
from twisted.internet import threads
msg = {'engineid':self.id,
'method':'execute',
'args':[lines]}
d = threads.deferToThread(self.wrapped_execute, msg, lines)
d.addCallback(self.addIDToResult)
return d
| |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import metering as metering_exc
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.db import l3_dvr_db
from neutron.extensions import metering
from neutron.objects import base as base_obj
from neutron.objects import metering as metering_objs
from neutron.objects import router as l3_obj
LOG = logging.getLogger(__name__)
class MeteringDbMixin(metering.MeteringPluginBase):
@staticmethod
def _make_metering_label_dict(metering_label, fields=None):
res = {'id': metering_label['id'],
'name': metering_label['name'],
'description': metering_label['description'],
'shared': metering_label['shared'],
'project_id': metering_label['project_id']}
return db_utils.resource_fields(res, fields)
def create_metering_label(self, context, metering_label):
m = metering_label['metering_label']
metering_obj = metering_objs.MeteringLabel(
context, id=uuidutils.generate_uuid(),
description=m['description'], project_id=m['project_id'],
name=m['name'], shared=m['shared'])
metering_obj.create()
return self._make_metering_label_dict(metering_obj)
def _get_metering_label(self, context, label_id):
metering_label = metering_objs.MeteringLabel.get_object(context,
id=label_id)
if not metering_label:
raise metering_exc.MeteringLabelNotFound(label_id=label_id)
return metering_label
def delete_metering_label(self, context, label_id):
deleted = metering_objs.MeteringLabel.delete_objects(
context, id=label_id)
if not deleted:
raise metering_exc.MeteringLabelNotFound(label_id=label_id)
def get_metering_label(self, context, label_id, fields=None):
return self._make_metering_label_dict(
self._get_metering_label(context, label_id), fields)
def get_metering_labels(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
metering_labels = metering_objs.MeteringLabel.get_objects(context,
_pager=pager,
**filters)
return [self._make_metering_label_dict(ml) for ml in metering_labels]
@staticmethod
def _make_metering_label_rule_dict(metering_label_rule, fields=None):
res = {'id': metering_label_rule['id'],
'metering_label_id': metering_label_rule['metering_label_id'],
'direction': metering_label_rule['direction'],
'remote_ip_prefix': metering_label_rule.get('remote_ip_prefix'),
'source_ip_prefix': metering_label_rule.get('source_ip_prefix'),
'destination_ip_prefix': metering_label_rule.get(
'destination_ip_prefix'),
'excluded': metering_label_rule['excluded']}
return db_utils.resource_fields(res, fields)
def get_metering_label_rules(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
metering_label_rules = metering_objs.MeteringLabelRule.get_objects(
context, _pager=pager, **filters)
return [self._make_metering_label_rule_dict(mlr)
for mlr in metering_label_rules]
def _get_metering_label_rule(self, context, rule_id):
metering_label_rule = metering_objs.MeteringLabelRule.get_object(
context, id=rule_id)
if not metering_label_rule:
raise metering_exc.MeteringLabelRuleNotFound(rule_id=rule_id)
return metering_label_rule
def get_metering_label_rule(self, context, rule_id, fields=None):
return self._make_metering_label_rule_dict(
self._get_metering_label_rule(context, rule_id), fields)
def create_metering_label_rule(self, context, metering_label_rule):
label_id = metering_label_rule['metering_label_id']
try:
with db_api.CONTEXT_WRITER.using(context):
rule = metering_objs.MeteringLabelRule(
context, id=uuidutils.generate_uuid(),
metering_label_id=label_id,
direction=metering_label_rule['direction'],
excluded=metering_label_rule['excluded'],
)
if metering_label_rule.get('remote_ip_prefix'):
rule.remote_ip_prefix = netaddr.IPNetwork(
metering_label_rule['remote_ip_prefix'])
if metering_label_rule.get('source_ip_prefix'):
rule.source_ip_prefix = netaddr.IPNetwork(
metering_label_rule['source_ip_prefix'])
if metering_label_rule.get('destination_ip_prefix'):
rule.destination_ip_prefix = netaddr.IPNetwork(
metering_label_rule['destination_ip_prefix'])
rule.create()
return self._make_metering_label_rule_dict(rule)
except db_exc.DBReferenceError:
raise metering_exc.MeteringLabelNotFound(label_id=label_id)
def delete_metering_label_rule(self, context, rule_id):
with db_api.CONTEXT_WRITER.using(context):
rule = self._get_metering_label_rule(context, rule_id)
rule.delete()
return self._make_metering_label_rule_dict(rule)
def _get_metering_rules_dict(self, metering_label):
rules = []
for rule in metering_label.rules:
rule_dict = self._make_metering_label_rule_dict(rule)
rules.append(rule_dict)
return rules
def _make_router_dict(self, router):
distributed = l3_dvr_db.is_distributed_router(router)
res = {'id': router['id'],
'name': router['name'],
'project_id': router['project_id'],
'admin_state_up': router['admin_state_up'],
'status': router['status'],
'gw_port_id': router['gw_port_id'],
'distributed': distributed,
constants.METERING_LABEL_KEY: []}
return res
def _process_sync_metering_data(self, context, labels):
routers = None
routers_dict = {}
for label in labels:
if label.shared:
if not routers:
routers = l3_obj.Router.get_objects(context)
else:
filters = {
'id': [router.id for router in label.db_obj.routers]}
routers = l3_obj.Router.get_objects(context, **filters)
for router in routers:
if not router['admin_state_up']:
continue
router_dict = routers_dict.get(
router['id'],
self._make_router_dict(router))
rules = self._get_metering_rules_dict(label)
data = {'id': label['id'], 'rules': rules,
'shared': label['shared'], 'name': label['name']}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return list(routers_dict.values())
def get_sync_data_for_rule(self, context, rule):
label = metering_objs.MeteringLabel.get_object(
context, id=rule['metering_label_id'])
if label.shared:
routers = l3_obj.Router.get_objects(context)
else:
filters = {'id': [router.id for router in label.db_obj.routers]}
routers = l3_obj.Router.get_objects(context, **filters)
routers_dict = {}
for router in routers:
router_dict = routers_dict.get(router['id'],
self._make_router_dict(router))
data = {'id': label['id'], 'rule': rule}
router_dict[constants.METERING_LABEL_KEY].append(data)
routers_dict[router['id']] = router_dict
return list(routers_dict.values())
def get_sync_data_metering(self, context, label_id=None):
filters = {'id': [label_id]} if label_id else {}
labels = metering_objs.MeteringLabel.get_objects(
context, **filters)
return self._process_sync_metering_data(context, labels)
| |
""" Represent and manipulate text collections as a list of documents."""
from collections import defaultdict
import multiprocessing
from cort.analysis import data_structures
from cort.core import documents
from cort.core import spans
__author__ = 'smartschat'
def from_string(string):
return documents.CoNLLDocument(string)
class Corpus:
"""Represents a text collection (a corpus) as a list of documents.
Such a text collection can also be read from data, and be supplemented with
antecedent information.
Attributes:
description(str): A human-readable description of the corpus.
documents (list(Document)): A list of CoNLL documents.
"""
def __init__(self, description, corpus_documents):
"""Construct a Corpus from a description and a list of documents.
Args:
description (str): A human-readable description of the corpus.
documents (list(Document)): A list of documents.
"""
self.description = description
self.documents = corpus_documents
def __iter__(self):
"""Return an iterator over documents in the corpus.
Returns:
An iterator over CoNLLDocuments.
"""
return iter(self.documents)
@staticmethod
def from_file(description, coref_file):
"""Construct a new corpus from a description and a file.
The file must contain documents in the format for the CoNLL shared
tasks on coreference resolution, see
http://conll.cemantix.org/2012/data.html.
Args:
description (str): A human-readable description of the corpus.
coref_file (file): A text file of documents in the CoNLL format.
Returns:
Corpus: A corpus consisting of the documents described in
coref_file
"""
if coref_file is None:
return []
document_as_strings = []
current_document = ""
for line in coref_file.readlines():
if line.startswith("#begin") and current_document != "":
document_as_strings.append(current_document)
current_document = ""
current_document += line
document_as_strings.append(current_document)
return Corpus(description, sorted([from_string(doc) for doc in
document_as_strings]))
def write_to_file(self, file):
"""Write a string representation of the corpus to a file,
Args:
file (file): The file the corpus should be written to.
"""
for document in self.documents:
file.write(document.get_string_representation())
def write_antecedent_decisions_to_file(self, file):
"""Write antecedent decisions in the corpus to a file.
For the format, have a look at the documenation for
read_antecedent_decisions in this class.
Args:
file (file): The file the antecedent decisions should be written
to.
"""
for document in self.documents:
document.write_antecedent_decisions_to_file(file)
def read_antecedents(self, file):
"""Augment corpus with antecedent decisions read from a file.
The attribute annotated_mentions is overwritten by mentions read in
from the antecedents file. Input files should have one antecedent
decision per line, where entries are separated by tabs. The format is
doc_identifier (anaphor_start, anaphor_end) (ante_start, ante_end)
where
- doc_id is the identifier in the first line of an CoNLL document
after #begin document, such as (bc/cctv/00/cctv_0000); part 000
- anaphor_start is the position in the document where the anaphor
begins (counting from 0),
- anaphor_end is the position where the anaphor ends (inclusive),
- ante_start, ante_end analogously for the antecedent.
Args:
file (file): The file the antecedent decisions should be written
to.
"""
doc_identifier_to_pairs = defaultdict(list)
for line in file.readlines():
splitted = line.split("\t")
doc_id = splitted[0]
span_anaphor = splitted[1]
span_antecedent = splitted[2]
doc_identifier_to_pairs[doc_id].append(
(spans.Span.parse(span_anaphor), spans.Span.parse(
span_antecedent)))
for doc in self.documents:
pairs = sorted(doc_identifier_to_pairs[doc.identifier])
doc.get_annotated_mentions_from_antecedent_decisions(pairs)
def read_coref_decisions(self,
mention_entity_mapping,
antecedent_mapping=None):
"""Augment corpus with coreference and antecedent decisions..
Set set_id attribute and antecedent information for system mentions.
Args:
mention_entity_mapping (dict(Mention, int)): A mapping of mentions
to entity identifiers.
antecedent_mapping (dict(Mention, Mention)): A mapping of mentions
to their antecedent. Optional..
"""
for doc in self.documents:
for mention in doc.system_mentions:
if mention in mention_entity_mapping:
mention.attributes["set_id"] = \
mention_entity_mapping[mention]
if antecedent_mapping and mention in antecedent_mapping:
antecedent = antecedent_mapping[mention]
mention.attributes['antecedent'] = antecedent
mention.document.antecedent_decisions[mention.span] = \
antecedent.span
def get_antecedent_decisions(self, which_mentions="annotated"):
""" Get all antecedent decisions in this corpus.
Args:
which_mentions (str): Either "annotated" or "system". Defaults to
"system". Signals whether to consider annotated mentions or
system mentions.
Returns:
StructuredCoreferenceAnalysis: A StructuredCoreferenceAnalysis
containing all antecedent decisions. Can be accessed like a
dict. If this is assigned a a variable ``x``, the
decisions can be accessed via ``x[self.description][
"decisions"]["all"]``, where ``self.description`` is the
``description`` attribute of the corpus (e.g. ``x["pair"][
"decisions"]["all"])..
"""
antecedent_decisions = {
self.description: {
"decisions": {
"all": {}
}
}
}
all_decisions = set()
for doc in self.documents:
doc_decisions = doc.get_antecedent_decisions(which_mentions)
for ana, ante in doc_decisions.items():
all_decisions.add((ana, ante))
antecedent_decisions[self.description]["decisions"]["all"] = \
data_structures.EnhancedSet(all_decisions)
return data_structures.StructuredCoreferenceAnalysis(
antecedent_decisions, {self.description: self}, None)
def are_coreferent(self, m, n):
""" Compute whether two mentions are coreferent in this corpus.
One use case of this function is when ``m`` and ``n`` belong to a
different corpus object, but you are interested in whether they are
coreferent according to the annotation present in this corpus.
Args:
m (Mention): A mention.
n (Mention): Another mention.
Returns:
True if ``m`` and ``n`` are coreferent according to the annotation
present in this corpus, False otherwise.
"""
if m.document != n.document:
return False
elif m.document not in self.documents:
return False
else:
doc = self.documents[self.documents.index(m.document)]
if m.span not in doc.spans_to_annotated_mentions or \
n.span not in doc.spans_to_annotated_mentions:
return False
m_in_this_corpus = doc.spans_to_annotated_mentions[m.span]
n_in_this_corpus = doc.spans_to_annotated_mentions[n.span]
return m_in_this_corpus.is_coreferent_with(n_in_this_corpus)
| |
import numpy as np
from statsmodels.tools.testing import ParamsTableTestBunch
est = dict(
rank=8,
N=3629,
Q=4.59536484786e-20,
J=1.66765790329e-16,
J_df=0,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=8,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( docvis - exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})), instruments(incomet ssiratio aget aget2 educyr actlim totchr) onestep vce(robust)", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="onestep",
wmatrix="robust",
vce="robust",
vcetype="Robust",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="incomet ssiratio aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="docvis - exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )", # noqa:E501
properties="b V",
)
params_table = np.array([
.62093805844748, .35860052573857, 1.731559252928, .08335206643438,
-.08190605683724, 1.3237821737322, np.nan, 1.9599639845401,
0, .68895699568302, .43817618784254, 1.5723286997298,
.11587434043505, -.1698525513714, 1.5477665427374, np.nan,
1.9599639845401, 0, .25750627258076, .05009451793791,
5.1404082358855, 2.741421857e-07, .15932282159956, .35568972356197,
np.nan, 1.9599639845401, 0, -.05352997420414,
.01103202674353, -4.8522339048464, 1.220785200e-06, -.07515234929795,
-.03190759911034, np.nan, 1.9599639845401, 0,
.03106248018916, .01032090201131, 3.0096671933432, .00261534090329,
.01083388395902, .05129107641931, np.nan, 1.9599639845401,
0, .14175365608301, .0494498280382, 2.8666157539212,
.00414886404159, .04483377408643, .23867353807958, np.nan,
1.9599639845401, 0, .23128095221422, .01565221628818,
14.776243054406, 2.084750820e-49, .20060317201116, .26195873241727,
np.nan, 1.9599639845401, 0, .34763567088735,
.31615794015526, 1.0995633091379, .27152243570261, -.27202250524333,
.96729384701803, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['_cons'] * 8
cov = np.array([
.12859433705998, .13265896898444, .00910916927048, -.00144786113189,
-.00037337560793, -.00152379041042, -.00336772308907, -.09899309651531,
.13265896898444, .19199837159222, .00979636564963, -.00135323134276,
.00180599814286, -.00930935415071, -.00460031335865, -.13429156867927,
.00910916927048, .00979636564963, .00250946072743, -.00052373946978,
5.155389870e-07, -.00016461502154, -.00025816911604, -.00869892550441,
-.00144786113189, -.00135323134276, -.00052373946978, .00012170561407,
8.334416260e-06, -.00002526568199, .00003797456789, .00131001446811,
-.00037337560793, .00180599814286, 5.155389870e-07, 8.334416260e-06,
.00010652101833, -.00026856403693, -.00003344387872, -.00122933496346,
-.00152379041042, -.00930935415071, -.00016461502154, -.00002526568199,
-.00026856403693, .00244528549301, .00003610001892, .00527355381855,
-.00336772308907, -.00460031335865, -.00025816911604, .00003797456789,
-.00003344387872, .00003610001892, .00024499187473, .00300075896709,
-.09899309651531, -.13429156867927, -.00869892550441, .00131001446811,
-.00122933496346, .00527355381855, .00300075896709, .09995584312322
]).reshape(8, 8)
cov_colnames = ['_cons'] * 8
cov_rownames = ['_cons'] * 8
results_addonestep = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank=8,
N=3629,
Q=6.09567389485e-33,
J=2.21212005644e-29,
J_df=0,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=8,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( docvis - exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})), instruments(incomet ssiratio aget aget2 educyr actlim totchr) twostep vce(robust)", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="twostep",
wmatrix="robust",
vce="robust",
vcetype="Robust",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="incomet ssiratio aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="docvis - exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )", # noqa:E501
properties="b V",
)
params_table = np.array([
.6209380584426, .35860052570457, 1.7315592530786, .08335206640755,
-.08190605677548, 1.3237821736607, np.nan, 1.9599639845401,
0, .68895699501744, .43817618789764, 1.5723286980131,
.11587434083298, -.16985255214498, 1.5477665421799, np.nan,
1.9599639845401, 0, .25750627271754, .05009451794125,
5.1404082382732, 2.741421823e-07, .15932282172979, .35568972370529,
np.nan, 1.9599639845401, 0, -.05352997423123,
.01103202674378, -4.8522339071944, 1.220785186e-06, -.07515234932551,
-.03190759913694, np.nan, 1.9599639845401, 0,
.03106248018903, .01032090201422, 3.0096671924822, .0026153409107,
.01083388395319, .05129107642488, np.nan, 1.9599639845401,
0, .14175365616691, .04944982804302, 2.8666157553386,
.00414886402301, .04483377416089, .23867353817294, np.nan,
1.9599639845401, 0, .23128095224221, .01565221628892,
14.776243055497, 2.084750786e-49, .20060317203771, .26195873244672,
np.nan, 1.9599639845401, 0, .34763567064032,
.31615794015859, 1.099563308345, .27152243604826, -.27202250549689,
.96729384677754, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['_cons'] * 8
cov = np.array([
.12859433703559, .1326589689683, .00910916927021, -.00144786113188,
-.00037337560766, -.00152379040753, -.00336772308885, -.09899309649807,
.1326589689683, .1919983716405, .00979636565235, -.00135323134324,
.00180599814488, -.00930935415256, -.00460031335946, -.13429156869395,
.00910916927021, .00979636565235, .00250946072777, -.00052373946983,
5.155391569e-07, -.00016461502162, -.00025816911611, -.00869892550672,
-.00144786113188, -.00135323134324, -.00052373946983, .00012170561408,
8.334416227e-06, -.00002526568198, .0000379745679, .00131001446858,
-.00037337560766, .00180599814488, 5.155391569e-07, 8.334416227e-06,
.00010652101839, -.00026856403706, -.00003344387875, -.00122933496459,
-.00152379040753, -.00930935415256, -.00016461502162, -.00002526568198,
-.00026856403706, .00244528549348, .00003610001887, .00527355381795,
-.00336772308885, -.00460031335946, -.00025816911611, .0000379745679,
-.00003344387875, .00003610001887, .00024499187475, .00300075896724,
-.09899309649807, -.13429156869395, -.00869892550672, .00131001446858,
-.00122933496459, .00527355381795, .00300075896724, .09995584312533
]).reshape(8, 8)
cov_colnames = ['_cons'] * 8
cov_rownames = ['_cons'] * 8
results_addtwostep = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank=8,
N=3629,
Q=.0002538911897719,
J=.9213711276820714,
J_df=1,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=9,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) onestep vce(robust)", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="onestep",
wmatrix="robust",
vce="robust",
vcetype="Robust",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1", # noqa:E501
properties="b V",
)
params_table = np.array([
.67045580921478, .25039046077656, 2.6776411814389, .00741425985435,
.17969952402034, 1.1612120944092, np.nan, 1.9599639845401,
0, .28551241628798, .10358919281318, 2.7561988710819,
.00584774303307, .08248132918657, .4885435033894, np.nan,
1.9599639845401, 0, .2672004738793, .05203985579809,
5.1345352476769, 2.828420839e-07, .16520423075439, .36919671700421,
np.nan, 1.9599639845401, 0, -.0560702624564,
.01191485946838, -4.7059105149509, 2.527353692e-06, -.07942295789528,
-.03271756701753, np.nan, 1.9599639845401, 0,
.01448379701656, .00782559934942, 1.8508227127214, .06419506241955,
-.00085409586574, .02982168989887, np.nan, 1.9599639845401,
0, .18130374188096, .0382173439987, 4.7440173206998,
2.095209222e-06, .10639912405874, .25620835970318, np.nan,
1.9599639845401, 0, .28146161235562, .01380395117777,
20.389931022715, 2.054354003e-92, .25440636520284, .30851685950839,
np.nan, 1.9599639845401, 0, .51399857133918,
.10262653035745, 5.0084375799215, 5.487366567e-07, .31285426798028,
.71514287469808, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['_cons'] * 8
cov = np.array([
.0626953828479, .02323594786658, .00535172023578, -.00103050587759,
-.00154311442856, .00154515839603, -.00043159973572, -.01570852578318,
.02323594786658, .01073072086769, .00207768328305, -.00039713375955,
-.00049396171685, .00027652302157, -.00020408147523, -.00701276303887,
.00535172023578, .00207768328305, .00270814659149, -.00059652725999,
-.00012298559534, .00021079055266, -.00004341699196, -.0031278522429,
-.00103050587759, -.00039713375955, -.00059652725999, .00014196387615,
.00002481291175, -.00006035908648, .00001093157006, .00059187926133,
-.00154311442856, -.00049396171685, -.00012298559534, .00002481291175,
.00006124000518, -.00001857594061, .00001436652009, .00008106194688,
.00154515839603, .00027652302157, .00021079055266, -.00006035908648,
-.00001857594061, .00146056538231, -.00016708887634, -.00074321753343,
-.00043159973572, -.00020408147523, -.00004341699196, .00001093157006,
.00001436652009, -.00016708887634, .00019054906812, -.00028024031412,
-.01570852578318, -.00701276303887, -.0031278522429, .00059187926133,
.00008106194688, -.00074321753343, -.00028024031412, .01053220473321
]).reshape(8, 8)
cov_colnames = ['_cons'] * 8
cov_rownames = ['_cons'] * 8
results_multonestep = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank=8,
N=3629,
Q=.0002589826272982,
J=.9398479544653281,
J_df=1,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=9,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep vce(robust)", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="twostep",
wmatrix="robust",
vce="robust",
vcetype="Robust",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1", # noqa:E501
properties="b V",
)
params_table = np.array([
.67815288158883, .25053953449054, 2.7067699433856, .00679413212727,
.18710441728393, 1.1692013458937, np.nan, 1.9599639845401,
0, .28872837589732, .1032733938985, 2.7957672833051,
.00517766683505, .08631624329503, .49114050849961, np.nan,
1.9599639845401, 0, .27067071818542, .05199695467114,
5.2055109745809, 1.934635127e-07, .16875855972422, .37258287664662,
np.nan, 1.9599639845401, 0, -.05690856524563,
.01189861686254, -4.7827882772482, 1.728801925e-06, -.08022942576205,
-.03358770472921, np.nan, 1.9599639845401, 0,
.01438118999252, .00783219080428, 1.8361644081315, .06633334485657,
-.00096962190392, .02973200188896, np.nan, 1.9599639845401,
0, .18038262255626, .03826653224544, 4.7138481584715,
2.430818311e-06, .10538159754195, .25538364757056, np.nan,
1.9599639845401, 0, .28251027986119, .01378475918788,
20.494393555287, 2.415775858e-93, .25549264831739, .30952791140498,
np.nan, 1.9599639845401, 0, .5077134442587,
.10235830367214, 4.9601588346456, 7.043556343e-07, .30709485554269,
.7083320329747, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['_cons'] * 8
cov = np.array([
.06277005834274, .02315710174743, .00533574120292, -.00102544979294,
-.00154463417995, .0015508406274, -.00043796451278, -.01559999387335,
.02315710174743, .01066539388732, .00206217803508, -.00039331197813,
-.00049172930967, .00027603135609, -.00020644763374, -.00694810289238,
.00533574120292, .00206217803508, .00270368329507, -.0005950942106,
-.00012276584915, .00021462173623, -.00004681980342, -.00310767551047,
-.00102544979294, -.00039331197813, -.0005950942106, .00014157708324,
.00002474211336, -.00006134660609, .00001178280314, .00058658157366,
-.00154463417995, -.00049172930967, -.00012276584915, .00002474211336,
.00006134321279, -.00001855941375, .00001443470174, .0000776612477,
.0015508406274, .00027603135609, .00021462173623, -.00006134660609,
-.00001855941375, .00146432749009, -.00016643326394, -.00074847803836,
-.00043796451278, -.00020644763374, -.00004681980342, .00001178280314,
.00001443470174, -.00016643326394, .00019001958587, -.00027573517109,
-.01559999387335, -.00694810289238, -.00310767551047, .00058658157366,
.0000776612477, -.00074847803836, -.00027573517109, .01047722233064
]).reshape(8, 8)
cov_colnames = ['_cons'] * 8
cov_rownames = ['_cons'] * 8
results_multtwostep = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank=8,
N=3629,
Q=.0002590497181628,
J=.940091427212973,
J_df=1,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=9,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep wmatrix(robust) vce(unadjusted) center", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="twostep",
wmatrix="robust",
vce="unadjusted",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1", # noqa:E501
properties="b V",
)
params_table = np.array([
.67815486150911, .25018082946574, 2.7106587781218, .00671496899138,
.1878094461339, 1.1685002768843, np.nan, 1.9599639845401,
0, .28872920226215, .10311429027815, 2.8000891193967,
.00510884999633, .08662890702558, .49082949749873, np.nan,
1.9599639845401, 0, .27067161407481, .0518802415232,
5.2172388972735, 1.816099638e-07, .16898820918009, .37235501896953,
np.nan, 1.9599639845401, 0, -.05690878166227,
.0118728670827, -4.7931793783164, 1.641587211e-06, -.08017917353758,
-.03363838978695, np.nan, 1.9599639845401, 0,
.01438116368432, .00781887593806, 1.8392878718448, .0658728559523,
-.00094355155385, .0297058789225, np.nan, 1.9599639845401,
0, .18038238197017, .03819661477822, 4.7224703816696,
2.329970297e-06, .10551839267351, .25524637126682, np.nan,
1.9599639845401, 0, .28251055147828, .01376659609161,
20.521452768591, 1.385109204e-93, .25552851894901, .30949258400755,
np.nan, 1.9599639845401, 0, .50771182444237,
.10208891085993, 4.9732318639284, 6.584582712e-07, .30762123593598,
.70780241294876, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['xb_private', 'xb_medicaid', 'xb_aget', 'xb_aget2',
'xb_educyr', 'xb_actlim', 'xb_totchr', 'b0']
cov = np.array([
.06259044743217, .02308524749042, .00531802921719, -.0010223122446,
-.00154027662468, .00154945994717, -.00043816683551, -.01554486097815,
.02308524749042, .01063255685957, .00205438168765, -.00039193802388,
-.00049039628782, .0002760841411, -.0002064504141, -.00691934867666,
.00531802921719, .00205438168765, .00269155946051, -.00059250696972,
-.00012247118567, .00021403084056, -.00004749600121, -.00308951213731,
-.0010223122446, -.00039193802388, -.00059250696972, .00014096497276,
.00002468288871, -.00006115240604, .00001190303672, .00058327928125,
-.00154027662468, -.00049039628782, -.00012247118567, .00002468288871,
.00006113482093, -.00001854325518, .00001439868646, .00007784185009,
.00154945994717, .0002760841411, .00021403084056, -.00006115240604,
-.00001854325518, .00145898138052, -.00016596475072, -.00074697007542,
-.00043816683551, -.0002064504141, -.00004749600121, .00001190303672,
.00001439868646, -.00016596475072, .00018951916795, -.00027350320218,
-.01554486097815, -.00691934867666, -.00308951213731, .00058327928125,
.00007784185009, -.00074697007542, -.00027350320218, .01042214572057
]).reshape(8, 8)
cov_colnames = ['xb_private', 'xb_medicaid', 'xb_aget', 'xb_aget2',
'xb_educyr', 'xb_actlim', 'xb_totchr', 'b0']
cov_rownames = ['xb_private', 'xb_medicaid', 'xb_aget', 'xb_aget2',
'xb_educyr', 'xb_actlim', 'xb_totchr', 'b0']
results_multtwostepdefault = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
est = dict(
rank=8,
N=3629,
Q=.0002590497181628,
J=.940091427212973,
J_df=1,
k_1=8,
converged=1,
has_xtinst=0,
type=1,
n_eq=1,
k=8,
n_moments=9,
k_aux=8,
k_eq_model=0,
k_eq=8,
cmdline="gmm ( (docvis / exp({xb:private medicaid aget aget2 educyr actlim totchr}+{b0})) - 1), instruments(income ssiratio medicaid aget aget2 educyr actlim totchr) twostep wmatrix(robust) center", # noqa:E501
cmd="gmm",
estat_cmd="gmm_estat",
predict="gmm_p",
marginsnotok="_ALL",
eqnames="1",
technique="gn",
winit="Unadjusted",
estimator="twostep",
wmatrix="robust",
vce="robust",
vcetype="Robust",
params="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
inst_1="income ssiratio medicaid aget aget2 educyr actlim totchr _cons",
params_1="xb_private xb_medicaid xb_aget xb_aget2 xb_educyr xb_actlim xb_totchr b0", # noqa:E501
sexp_1="(docvis / exp( ({xb_private} *private + {xb_medicaid} *medicaid + {xb_aget} *aget + {xb_aget2} *aget2 + {xb_educyr} *educyr + {xb_actlim} *actlim + {xb_totchr} *totchr) + {b0} )) - 1", # noqa:E501
properties="b V",
)
params_table = np.array([
.67815486150911, .25053960844836, 2.7067770469869, .00679398676131,
.18710625224955, 1.1692034707687, np.nan, 1.9599639845401,
0, .28872920226215, .10327332768441, 2.7957770775479,
.00517750993835, .08631719943712, .49114120508719, np.nan,
1.9599639845401, 0, .27067161407481, .05199697557915,
5.2055261110869, 1.934477426e-07, .16875941463467, .37258381351495,
np.nan, 1.9599639845401, 0, -.05690878166227,
.01189862079945, -4.7828048831437, 1.728659059e-06, -.08022964989488,
-.03358791342965, np.nan, 1.9599639845401, 0,
.01438116368432, .00783219272776, 1.8361605982125, .06633390816397,
-.00096965198207, .02973197935072, np.nan, 1.9599639845401,
0, .18038238197017, .03826654814775, 4.71383991244,
2.430916736e-06, .10538132578791, .25538343815243, np.nan,
1.9599639845401, 0, .28251055147828, .01378476509846,
20.494404471929, 2.415234157e-93, .25549290834996, .3095281946066,
np.nan, 1.9599639845401, 0, .50771182444237,
.10235828870929, 4.960143734762, 7.044103886e-07, .307093265053,
.70833038383174, np.nan, 1.9599639845401, 0
]).reshape(8, 9)
params_table_colnames = 'b se z pvalue ll ul df crit eform'.split()
params_table_rownames = ['_cons'] * 8
cov = np.array([
.06277009540146, .02315708886727, .00533574465012, -.0010254503134,
-.00154463481696, .00155084007911, -.00043796389511, -.01559997980204,
.02315708886727, .01066538021101, .00206217721135, -.00039331175814,
-.00049172883672, .00027603038575, -.00020644729789, -.00694809209467,
.00533574465012, .00206217721135, .00270368546938, -.00059509464294,
-.000122765895, .00021462183651, -.00004681968717, -.003107676362,
-.0010254503134, -.00039331175814, -.00059509464294, .00014157717693,
.00002474211983, -.00006134664668, .00001178278294, .00058658166731,
-.00154463481696, -.00049172883672, -.000122765895, .00002474211983,
.00006134324292, -.00001855938213, .00001443468876, .00007766055925,
.00155084007911, .00027603038575, .00021462183651, -.00006134664668,
-.00001855938213, .00146432870714, -.00016643336248, -.00074847778305,
-.00043796389511, -.00020644729789, -.00004681968717, .00001178278294,
.00001443468876, -.00016643336248, .00019001974882, -.00027573582025,
-.01559997980204, -.00694809209467, -.003107676362, .00058658166731,
.00007766055925, -.00074847778305, -.00027573582025, .0104772192675
]).reshape(8, 8)
cov_colnames = ['_cons'] * 8
cov_rownames = ['_cons'] * 8
results_multtwostepcenter = ParamsTableTestBunch(
params_table=params_table,
params_table_colnames=params_table_colnames,
params_table_rownames=params_table_rownames,
cov=cov,
cov_colnames=cov_colnames,
cov_rownames=cov_rownames,
**est
)
| |
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event',
]
import errno
import sys
import tempfile
import threading
from . import context
from . import process
from . import util
from ._ext import _billiard, ensure_SemLock
from .five import range, monotonic
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
ensure_SemLock()
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _billiard.SemLock.SEM_VALUE_MAX
try:
sem_unlink = _billiard.SemLock.sem_unlink
except AttributeError: # pragma: no cover
try:
# Py3.4+ implements sem_unlink and the semaphore must be named
from _multiprocessing import sem_unlink # noqa
except ImportError:
sem_unlink = None # noqa
#
# Base class for semaphores and mutexes; wraps `_billiard.SemLock`
#
def _semname(sl):
try:
return sl.name
except AttributeError:
pass
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, ctx=None):
if ctx is None:
ctx = context._default_context.get_context()
name = ctx.get_start_method()
unlink_now = sys.platform == 'win32' or name == 'fork'
if sem_unlink:
for i in range(100):
try:
sl = self._semlock = _billiard.SemLock(
kind, value, maxvalue, self._make_name(), unlink_now,
)
except (OSError, IOError) as exc:
if getattr(exc, 'errno', None) != errno.EEXIST:
raise
else:
break
else:
exc = IOError('cannot find file for semaphore')
exc.errno = errno.EEXIST
raise exc
else:
sl = self._semlock = _billiard.SemLock(kind, value, maxvalue)
util.debug('created semlock with handle %s', sl.handle)
self._make_methods()
if sem_unlink:
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
if _semname(self._semlock) is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
from .semaphore_tracker import register
register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .semaphore_tracker import unregister
sem_unlink(name)
unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
context.assert_spawning(self)
sl = self._semlock
if sys.platform == 'win32':
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
state = (h, sl.kind, sl.maxvalue)
try:
state += (sl.name, )
except AttributeError:
pass
return state
def __setstate__(self, state):
self._semlock = _billiard.SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r', state[0])
self._make_methods()
@staticmethod
def _make_name():
return '%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
class Semaphore(SemLock):
def __init__(self, value=1, ctx=None):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
class BoundedSemaphore(Semaphore):
def __init__(self, value=1, ctx=None):
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % (
self.__class__.__name__, value, self._semlock.maxvalue)
class Lock(SemLock):
'''
Non-recursive lock.
'''
def __init__(self, ctx=None):
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
class RLock(SemLock):
'''
Recursive lock
'''
def __init__(self, ctx=None):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
class Condition(object):
'''
Condition variable
'''
def __init__(self, lock=None, ctx=None):
assert ctx
self._lock = lock or ctx.RLock()
self._sleeping_count = ctx.Semaphore(0)
self._woken_count = ctx.Semaphore(0)
self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unkown'
return '<%s(%s, %s)>' % (
self.__class__.__name__, self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
if self._sleeping_count.acquire(False): # try grabbing a sleeper
self._wait_semaphore.release() # wake up one sleeper
self._woken_count.acquire() # wait for sleeper to wake
# rezero _wait_semaphore in case a timeout just happened
self._wait_semaphore.acquire(False)
def notify_all(self):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(False)
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res
sleepers = 0
while self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class Event(object):
def __init__(self, ctx=None):
assert ctx
self._cond = ctx.Condition(ctx.Lock())
self._flag = ctx.Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
#
# Barrier
#
if hasattr(threading, 'Barrier'):
class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None, ctx=None):
assert ctx
import struct
from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
def __setstate__(self, state):
(self._parties, self._action, self._timeout,
self._cond, self._wrapper) = state
self._array = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._parties, self._action, self._timeout,
self._cond, self._wrapper)
@property
def _state(self):
return self._array[0]
@_state.setter
def _state(self, value): # noqa
self._array[0] = value
@property
def _count(self):
return self._array[1]
@_count.setter
def _count(self, value): # noqa
self._array[1] = value
else:
class Barrier(object): # noqa
def __init__(self, *args, **kwargs):
raise NotImplementedError('Barrier only supported on Py3')
| |
# -*- coding: utf-8 -*-
"""
This module implements the GenNetwork class, which implements generic network
logic. Also implements Population and GenConnection classes
@author: DanielM
"""
from neuron import h
import random
import numpy as np
import matplotlib.pyplot as plt
import math
import time
import os
import shelve
import scipy.stats as stats
class GenConnection(object):
def __init__(self):
pass
def get_description(self):
"""Return a descriptive string for the connection"""
name = self.pre_pop.name + ' to ' + self.post_pop.name + '\n'
pre_cell_targets = '\n'.join([str(x) for x in self.pre_cell_targets])
return name + pre_cell_targets
def get_name(self):
if type(self.pre_pop) == str:
return self.pre_pop + ' to ' + str(self.post_pop)
else:
return str(self.pre_pop) + ' to ' + str(self.post_pop)
def get_properties(self):
"""Get the and make them suitable for pickling"""
properties = {'name': self.get_name(),
'init_parameters': self.init_parameters,
'pre_cell_targets': self.pre_cell_targets}
properties['init_parameters']['post_pop'] = str(properties['init_parameters']['post_pop'])
properties['init_parameters']['self'] = str(properties['init_parameters']['self'])
try:
properties['init_parameters']['pre_pop'] = str(properties['init_parameters']['pre_pop'])
except:
pass
return {self.get_name(): properties}
class tmgsynConnection(GenConnection):
def __init__(self, pre_pop, post_pop,
target_pool, target_segs, divergence,
tau_1, tau_facil, U, tau_rec, e, thr, delay, weight):
"""Create a connection with tmgsyn as published by Tsodyks, Pawelzik &
Markram, 1998.
The tmgsyn is a dynamic three state implicit resource synapse model.
The response onset is instantaneous and the decay is exponential.
It combines a frequency dependent depression and a facilitation
mechanism that both depend on independent time constants.
The synaptic targets are chosen by proximity, that is the target pool
are the cells in closest proximity.
Parameters
----------
pre_pop - gennetwork.Population
The presynaptic population
post_pop - gennetwork.Population
the postsynaptic population
target_pool - int
the number of cells in the target pool
target_segs - str
the name of the segments that are possible synaptic targets at the
postsynaptic population
divergence - int
divergence in absolute terms, that is the number of synapses each
presynaptic cell forms
tau_1 - numeric
the time constant of synaptic decay. conforms to the transition
from the active to the inactive resource state. units of time as in
neuron standard units
tau_facil - numeric
the time constant of facilitation decay. this essentially creates
the frequency dependence. set to 0 for no facilitation.
U - numeric
maximum of postsynaptic response. has to be considered together
with the weight from the netcon.
tau_rec - numeric
time constant of recovery from inactive for recovered state.
gives depression since inactive resources do not contribute to
postsynaptic signal. set to 0 for no depression.
e - numeric
equilibrium potential of the postsynaptic conductance
thr - numeric
threshold for synaptic event at the presynaptic source
delay - numeric
delay between presynaptic signal and onset of postsynaptic signal
weight - numeric
weight for the netcon object connecting source and target
Returns
-------
None
Use Cases
---------
>>> tmgsynConnection(nw.population[0], nw.population[1],
3, 'prox', 1, 6.0, 0, 0.04, 0, 0, 10, 3, 0)
A non-facilitating, non-depressing excitatory connection.
"""
self.init_parameters = locals()
self.pre_pop = pre_pop
self.post_pop = post_pop
pre_pop.add_connection(self)
post_pop.add_connection(self)
pre_pop_rad = (np.arange(pre_pop.get_cell_number(), dtype=float) /
pre_pop.get_cell_number()) * (2*np.pi)
post_pop_rad = (np.arange(post_pop.get_cell_number(), dtype=float) /
post_pop.get_cell_number()) * (2*np.pi)
pre_pop_pos = pos(pre_pop_rad)
post_pop_pos = pos(post_pop_rad)
pre_cell_target = []
synapses = []
netcons = []
conductances = []
for idx, curr_cell_pos in enumerate(pre_pop_pos):
curr_dist = []
for post_cell_pos in post_pop_pos:
curr_dist.append(euclidian_dist(curr_cell_pos, post_cell_pos))
sort_idc = np.argsort(curr_dist)
closest_cells = sort_idc[0:target_pool]
picked_cells = np.random.choice(closest_cells,
divergence,
replace=False)
pre_cell_target.append(picked_cells)
for tar_c in picked_cells:
curr_syns = []
curr_netcons = []
curr_conductances = []
curr_seg_pool = post_pop[tar_c].get_segs_by_name(target_segs)
chosen_seg = np.random.choice(curr_seg_pool)
for seg in chosen_seg:
curr_syn = h.tmgsyn(chosen_seg(0.5))
curr_syn.tau_1 = tau_1
curr_syn.tau_facil = tau_facil
curr_syn.U = U
curr_syn.e = e
curr_syn.tau_rec = tau_rec
curr_syns.append(curr_syn)
curr_netcon = h.NetCon(pre_pop[idx].soma(0.5)._ref_v,
curr_syn, thr, delay,
weight, sec=pre_pop[idx].soma)
curr_gvec = h.Vector()
curr_gvec.record(curr_syn._ref_g)
curr_conductances.append(curr_gvec)
curr_netcons.append(curr_netcon)
netcons.append(curr_netcons)
synapses.append(curr_syns)
conductances.append(curr_conductances)
self.conductances = conductances
self.netcons = netcons
self.pre_cell_targets = np.array(pre_cell_target)
self.synapses = synapses
class tmgsynConnectionExponentialProb(GenConnection):
def __init__(self, pre_pop, post_pop,
scale, target_segs, divergence,
tau_1, tau_facil, U, tau_rec, e, thr, delay, weight):
"""Create a connection with tmgsyn as published by Tsodyks, Pawelzik &
Markram, 1998.
The tmgsyn is a dynamic three state implicit resource synapse model.
The response onset is instantaneous and the decay is exponential.
It combines a frequency dependent depression and a facilitation
mechanism that both depend on independent time constants.
The synaptic targets are chosen by proximity, that is the target pool
are the cells in closest proximity.
Parameters
----------
pre_pop - gennetwork.Population
The presynaptic population
post_pop - gennetwork.Population
the postsynaptic population
target_pool - int
the number of cells in the target pool
target_segs - str
the name of the segments that are possible synaptic targets at the
postsynaptic population
divergence - int
divergence in absolute terms, that is the number of synapses each
presynaptic cell forms
tau_1 - numeric
the time constant of synaptic decay. conforms to the transition
from the active to the inactive resource state. units of time as in
neuron standard units
tau_facil - numeric
the time constant of facilitation decay. this essentially creates
the frequency dependence. set to 0 for no facilitation.
U - numeric
maximum of postsynaptic response. has to be considered together
with the weight from the netcon.
tau_rec - numeric
time constant of recovery from inactive for recovered state.
gives depression since inactive resources do not contribute to
postsynaptic signal. set to 0 for no depression.
e - numeric
equilibrium potential of the postsynaptic conductance
thr - numeric
threshold for synaptic event at the presynaptic source
delay - numeric
delay between presynaptic signal and onset of postsynaptic signal
weight - numeric
weight for the netcon object connecting source and target
Returns
-------
None
Use Cases
---------
>>> tmgsynConnection(nw.population[0], nw.population[1],
3, 'prox', 1, 6.0, 0, 0.04, 0, 0, 10, 3, 0)
A non-facilitating, non-depressing excitatory connection.
"""
self.init_parameters = locals()
self.pre_pop = pre_pop
self.post_pop = post_pop
pre_pop.add_connection(self)
post_pop.add_connection(self)
pre_pop_rad = (np.arange(pre_pop.get_cell_number(), dtype=float) /
pre_pop.get_cell_number()) * (2*np.pi)
post_pop_rad = (np.arange(post_pop.get_cell_number(), dtype=float) /
post_pop.get_cell_number()) * (2*np.pi)
pre_pop_pos = pos(pre_pop_rad)
post_pop_pos = pos(post_pop_rad)
pre_cell_target = []
synapses = []
netcons = []
# Setup the Gaussian distribution
loc = post_pop.get_cell_number() / 2
gauss = stats.expon(loc=0, scale=scale)
pdf = gauss.pdf(np.arange(post_pop.get_cell_number()))
pdf = pdf/pdf.sum()
for idx, curr_cell_pos in enumerate(pre_pop_pos):
curr_dist = []
for post_cell_pos in post_pop_pos:
curr_dist.append(euclidian_dist(curr_cell_pos, post_cell_pos))
sort_idc = np.argsort(curr_dist)
picked_cells = np.random.choice(sort_idc, divergence,
replace=True, p=pdf)
pre_cell_target.append(picked_cells)
for target_cell in picked_cells:
curr_syns = []
curr_netcons = []
curr_seg_pool = post_pop[target_cell].get_segs_by_name(target_segs)
chosen_seg = np.random.choice(curr_seg_pool)
for seg in chosen_seg:
curr_syn = h.tmgsyn(chosen_seg(0.5))
curr_syn.tau_1 = tau_1
curr_syn.tau_facil = tau_facil
curr_syn.U = U
curr_syn.e = e
curr_syn.tau_rec = tau_rec
curr_syns.append(curr_syn)
curr_netcon = h.NetCon(pre_pop[idx].soma(0.5)._ref_v,
curr_syn, thr, delay, weight,
sec=pre_pop[idx].soma)
curr_netcons.append(curr_netcon)
netcons.append(curr_netcons)
synapses.append(curr_syns)
self.netcons = netcons
self.pre_cell_targets = np.array(pre_cell_target)
self.synapses = synapses
class tmgsynConnection_old(GenConnection):
def __init__(self, pre_pop, post_pop,
target_pool, target_segs, divergence,
tau_1, tau_facil, U, tau_rec, e, thr, delay, weight):
"""Create a connection with tmgsyn as published by Tsodyks, Pawelzik &
Markram, 1998.
The tmgsyn is a dynamic three state implicit resource synapse model.
The response onset is instantaneous and the decay is exponential.
It combines a frequency dependent depression and a facilitation
mechanism that both depend on independent time constants.
The synaptic targets are chosen by proximity, that is the target pool
are the cells in closest proximity.
Parameters
----------
pre_pop - gennetwork.Population
The presynaptic population
post_pop - gennetwork.Population
the postsynaptic population
target_pool - int
the number of cells in the target pool
target_segs - str
the name of the segments that are possible synaptic targets at the
postsynaptic population
divergence - int
divergence in absolute terms, that is the number of synapses each
presynaptic cell forms
tau_1 - numeric
the time constant of synaptic decay. conforms to the transition
from the active to the inactive resource state. units of time as in
neuron standard units
tau_facil - numeric
the time constant of facilitation decay. this essentially creates
the frequency dependence. set to 0 for no facilitation.
U - numeric
maximum of postsynaptic response. has to be considered together
with the weight from the netcon.
tau_rec - numeric
time constant of recovery from inactive for recovered state.
gives depression since inactive resources do not contribute to
postsynaptic signal. set to 0 for no depression.
e - numeric
equilibrium potential of the postsynaptic conductance
thr - numeric
threshold for synaptic event at the presynaptic source
delay - numeric
delay between presynaptic signal and onset of postsynaptic signal
weight - numeric
weight for the netcon object connecting source and target
Returns
-------
None
Use Cases
---------
>>> tmgsynConnection(nw.population[0], nw.population[1],
3, 'prox', 1, 6.0, 0, 0.04, 0, 0, 10, 3, 0)
A non-facilitating, non-depressing excitatory connection.
"""
self.init_parameters = locals()
self.pre_pop = pre_pop
self.post_pop = post_pop
pre_pop.add_connection(self)
post_pop.add_connection(self)
pre_pop_rad = (np.arange(pre_pop.get_cell_number(), dtype=float) /
pre_pop.get_cell_number()) * (2*np.pi)
post_pop_rad = (np.arange(post_pop.get_cell_number(), dtype=float) /
post_pop.get_cell_number()) * (2*np.pi)
pre_pop_pos = pos(pre_pop_rad)
post_pop_pos = pos(post_pop_rad)
pre_cell_target = []
synapses = []
netcons = []
for idx, curr_cell_pos in enumerate(pre_pop_pos):
curr_dist = []
for post_cell_pos in post_pop_pos:
curr_dist.append(euclidian_dist(curr_cell_pos, post_cell_pos))
sort_idc = np.argsort(curr_dist)
closest_cells = sort_idc[0:target_pool]
picked_cells = np.random.choice(closest_cells,
divergence,
replace=False)
pre_cell_target.append(picked_cells)
for tar_c in picked_cells:
curr_syns = []
curr_netcons = []
curr_seg_pool = post_pop[tar_c].get_segs_by_name(target_segs)
chosen_seg = np.random.choice(curr_seg_pool)
for seg in chosen_seg:
curr_syn = h.tmgsyn(chosen_seg(0.5))
curr_syn.tau_1 = tau_1
curr_syn.tau_facil = tau_facil
curr_syn.U = U
curr_syn.e = e
curr_syn.tau_rec = tau_rec
curr_syns.append(curr_syn)
curr_netcon = h.NetCon(pre_pop[idx].soma(0.5)._ref_v,
curr_syn, thr, delay, weight,
sec=pre_pop[idx].soma)
curr_netcons.append(curr_netcon)
netcons.append(curr_netcons)
synapses.append(curr_syns)
self.netcons = netcons
self.pre_cell_targets = np.array(pre_cell_target)
self.synapses = synapses
class Exp2SynConnection(GenConnection):
"""
This class connects a pre and a post synaptic population with a Exp2Syn
synapse.
"""
def __init__(self, pre_pop, post_pop, target_pool, target_segs, divergence,
tau1, tau2, e, thr, delay, weight):
"""
divergence,
tau1, tau2, e, g_max, thr, delay, weight, name = "GC->MC"
"""
self.init_parameters = locals()
self.pre_pop = pre_pop
self.post_pop = post_pop
pre_pop.add_connection(self)
post_pop.add_connection(self)
pre_pop_rad = (np.arange(pre_pop.get_cell_number(), dtype=float) /
pre_pop.get_cell_number()) * (2*np.pi)
post_pop_rad = (np.arange(post_pop.get_cell_number(), dtype=float) /
post_pop.get_cell_number()) * (2*np.pi)
self.pre_pop_rad = pre_pop_rad
self.post_pop_rad = post_pop_rad
pre_pop_pos = pos(pre_pop_rad)
post_pop_pos = pos(post_pop_rad)
pre_cell_target = []
synapses = []
netcons = []
for idx, curr_cell_pos in enumerate(pre_pop_pos):
curr_dist = []
for post_cell_pos in post_pop_pos:
curr_dist.append(euclidian_dist(curr_cell_pos, post_cell_pos))
sort_idc = np.argsort(curr_dist)
closest_cells = sort_idc[0:target_pool]
picked_cells = np.random.choice(closest_cells,
divergence,
replace=False)
pre_cell_target.append(picked_cells)
for tar_c in picked_cells:
curr_syns = []
curr_netcons = []
curr_seg_pool = post_pop[tar_c].get_segs_by_name(target_segs)
chosen_seg = np.random.choice(curr_seg_pool)
for seg in chosen_seg:
curr_syn = h.Exp2Syn(chosen_seg(0.5))
curr_syn.tau1 = tau1
curr_syn.tau2 = tau2
curr_syn.e = e
curr_syns.append(curr_syn)
curr_netcon = h.NetCon(pre_pop[idx].soma(0.5)._ref_v,
curr_syn, thr, delay, weight,
sec=pre_pop[idx].soma)
curr_netcons.append(curr_netcon)
netcons.append(curr_netcons)
synapses.append(curr_syns)
self.netcons = netcons
self.pre_cell_targets = np.array(pre_cell_target)
self.synapses = synapses
class PerforantPathStimulation(object):
"""
This class connects a pre and a post synaptic population with a Exp2Syn
synapse.
"""
def __init__(self, stim, post_pop, n_targets, target_segs,
tau1, tau2, e, thr, delay, weight):
"""
divergence,
tau1, tau2, e, g_max, thr, delay, weight, name = "GC->MC"
"""
self.pre_pop = stim
self.post_pop = post_pop
post_pop.add_connection(self)
synapses = []
netcons = []
if type(n_targets) == int:
# Select n_targets from post_pop
target_cells = np.random.choice(post_pop.cells, n_targets,
replace=False)
else:
target_cells = post_pop.cells[n_targets]
for curr_cell in target_cells:
curr_seg_pool = curr_cell.get_segs_by_name(target_segs)
for seg in curr_seg_pool:
curr_syn = h.Exp2Syn(seg(0.5))
curr_syn.tau1 = tau1
curr_syn.tau2 = tau2
curr_syn.e = e
curr_netcon = h.NetCon(stim, curr_syn, thr, delay, weight)
netcons.append(curr_netcon)
synapses.append(curr_syn)
self.netcons = netcons
self.pre_cell_targets = np.array(target_cells)
self.synapses = synapses
class PerforantPathPoissonStimulation(object):
"""
Patterned Perforant Path stimulation as in Yim et al. 2015.
uses vecevent.mod -> h.VecStim
"""
def __init__(self, post_pop, t_pattern, spat_pattern, target_segs,
tau1, tau2, e, weight):
post_pop.add_connection(self)
synapses = []
netcons = []
conductances = []
target_cells = post_pop.cells[spat_pattern]
self.pre_pop = "Implicit"
self.vecstim = h.VecStim()
self.pattern_vec = h.Vector(t_pattern)
self.vecstim.play(self.pattern_vec)
for curr_cell in target_cells:
curr_seg_pool = curr_cell.get_segs_by_name(target_segs)
curr_conductances = []
for seg in curr_seg_pool:
curr_syn = h.Exp2Syn(seg(0.5))
curr_syn.tau1 = tau1
curr_syn.tau2 = tau2
curr_syn.e = e
curr_netcon = h.NetCon(self.vecstim, curr_syn)
curr_gvec = h.Vector()
curr_gvec.record(curr_syn._ref_g)
curr_conductances.append(curr_gvec)
curr_netcon.weight[0] = weight
netcons.append(curr_netcon)
synapses.append(curr_syn)
"""for event in pattern:
curr_netcon.event(event)"""
conductances.append(curr_conductances)
self.netcons = netcons
self.pre_cell_targets = np.array(target_cells)
self.synapses = synapses
self.conductances = conductances
class PerforantPathPoissonTmgsyn(GenConnection):
"""
Patterned Perforant Path simulation as in Yim et al. 2015.
uses vecevent.mod -> h.VecStim
"""
def __init__(self, post_pop, t_pattern, spat_pattern, target_segs,
tau_1, tau_facil, U, tau_rec, e, weight):
self.init_parameters = locals()
post_pop.add_connection(self)
synapses = []
netcons = []
t_pattern = list(t_pattern) # nrn does not like np.ndarrays?
target_cells = post_pop[spat_pattern]
self.pre_pop = 'Implicit'
self.post_pop = post_pop
self.vecstim = h.VecStim()
self.pattern_vec = h.Vector(t_pattern)
self.vecstim.play(self.pattern_vec)
conductances = []
for curr_cell in target_cells:
curr_seg_pool = curr_cell.get_segs_by_name(target_segs)
curr_conductances = []
for seg in curr_seg_pool:
curr_syn = h.tmgsyn(seg(0.5))
curr_syn.tau_1 = tau_1
curr_syn.tau_facil = tau_facil
curr_syn.U = U
curr_syn.tau_rec = tau_rec
curr_syn.e = e
curr_netcon = h.NetCon(self.vecstim, curr_syn)
curr_gvec = h.Vector()
curr_gvec.record(curr_syn._ref_g)
curr_conductances.append(curr_gvec)
curr_netcon.weight[0] = weight
netcons.append(curr_netcon)
synapses.append(curr_syn)
conductances.append(curr_conductances)
self.conductances = conductances
self.netcons = netcons
self.pre_cell_targets = np.array(spat_pattern)
self.synapses = synapses
"""Population ONLY REMAINS IN gennetwork TO KEEP pyDentate RUNNING. THE NEW
IMPLEMENTATION OF POPULATION IS IN genpopulation"""
class Population(object):
"""This is the model of a generic population.
A population is a number of cells of a specific type derived from
genneuron.GenNeuron. The GenPopulation object keeps track of all
incoming and outgoing connections. It is recommended to create Populations
through the GenNetwork.mk_population interface of a network the population
is part of.
Attributes
----------
parent_network - gennetwork.GenNetwork or derived instances
The network the population takes part in
cell_type - genneuron.GenNeuron class or subclass thereof
The cell type making up the population
cells - list of genneuron.GenNeuron instances
A list of cells that currently exist within the population
connections - list of Connection objects
A list of outgoing and incoming connections
Methods
-------
__init__
make_cells
get_cell_number
record_aps
plot_aps
write_aps
current_clamp_rnd
current_clamp_range
voltage_recording
add_connection
Use cases
---------
>>> nw = GenNetwork()
>>> nw.mk_population(GranuleCell, 500)
Create an empty network and create a population of 500 granule cells in the
network.
"""
def __init__(self, cell_type=None, n_cells=None, parent_network=None):
self.parent_network = parent_network
self.cell_type = cell_type
self.cells = []
self.connections = []
self.VClamps = []
self.VClamps_i = []
self.VRecords = []
if cell_type and n_cells:
self.make_cells(cell_type, n_cells)
self.i = 0
def SEClamp(self, cells, dur1=200, amp1=0, rs=0.001):
for x in cells:
clamp = self.cells[x]._SEClamp(dur1=dur1, amp1=amp1, rs=rs)
self.VClamps.append(clamp)
curr_vec = h.Vector()
curr_vec.record(clamp._ref_i)
self.VClamps_i.append(curr_vec)
def voltage_recording(self, cells):
for x in cells:
record = self.cells[x]._voltage_recording()
self.VRecords.append(record)
def make_cells(self, cell_type, n_cells):
"""Create cells of a certain type
Parameters
----------
cell_type - genneuron.GenNeuron class of subclass thereof
the type of the cells to be created
n_cells - numeric
number of cells to be created
Returns
-------
None
Use Cases
---------
>>> popul = Population(parent_network = nw)
>>> popul.make_cells(GranuleCell, 500)
Create an empty population within nw and then create 500 granule cells
"""
if hasattr(self, 'cell_type'):
if self.cell_type != cell_type:
raise TypeError("cell_type inconsistent with population")
else:
self.cell_type = cell_type
if not hasattr(self, 'cells'):
self.cells = []
for x in range(n_cells):
self.cells.append(cell_type())
self.cells = np.array(self.cells, dtype=object)
def get_cell_number(self):
"""Return the number of cells"""
return len(self.cells)
def record_aps(self):
counters = []
for cell in self.cells:
counters.append(cell._AP_counter())
self.ap_counters = counters
return counters
def plot_aps(self, color='k'):
cells = []
for x in self.ap_counters:
# as_numpy() doesn't work on windows 10 ???
try:
cells.append(x[0].as_numpy())
except:
cells.append(np.array(x[0]))
# Workaround for matplotlib bug. plt.eventplot throws error when first
# element empty
if not np.array(cells[0]).any():
cells[0] = np.array([0], dtype=float)
plt.eventplot(cells, linewidth=2, color=color)
def write_aps(self, directory='', fname=''):
if not fname:
time_tup = time.gmtime()
time_str = time.asctime(time_tup)
time_str = '_'.join(time_str.split(' '))
nw_name = self.parent_network.__class__.name
pop_name = self.cell_type.name
fname = nw_name + '_' + pop_name + '_' + time_str
fname = fname.replace(':', '-')
if not directory:
directory = os.getcwd()
if not os.path.isdir(directory):
os.mkdir(directory)
path = directory + '\\' + fname + '.npz'
try:
ap_list = [x[0].as_numpy() for x in self.ap_counters]
except:
ap_list = [np.array(x[0]) for x in self.ap_counters]
np.savez(path, *ap_list)
def perc_active_cells(self):
try:
# as_numpy doesn't work on windows 10 ???
timing_arrays = [x[0].as_numpy() for x in self.ap_counters]
except:
timing_arrays = [np.array(x[0]) for x in self.ap_counters]
active_counter = 0
for x in timing_arrays:
if x.size != 0:
active_counter = active_counter + 1
return (active_counter / float(self.get_cell_number())) * 100
def mk_current_clamp(self, cells, amp=0.3, dur=5, delays=3):
if not hasattr(cells, '__iter__'):
cells = np.random.choice(self.get_cell_number(), cells,
replace=False)
if not hasattr(delays, '__iter__'):
delays = np.array(delays)
for cell in cells:
for delay in delays:
self.cells[cell]._current_clamp_soma(amp=amp, dur=dur,
delay=delay)
def current_clamp_rnd(self, n_cells, amp=0.3, dur=5, delay=3):
"""DEPRECATE"""
chosen_cells = np.random.choice(self.cells, n_cells, replace=False)
for x in chosen_cells:
for y in delay:
x._current_clamp_soma(amp=amp, dur=dur, delay=y)
return chosen_cells
def current_clamp_range(self, n_cells, amp=0.3, dur=5, delay=3):
"""DEPRECATE"""
if type(n_cells) == int:
n_cells = range(n_cells)
for cell in n_cells:
self.cells[cell]._current_clamp_soma(amp=amp, dur=dur, delay=delay)
"""def voltage_recording(self, cell_type):
rnd_int = random.randint(0, len(self.cells) - 1)
soma_v_vec = self.cells[rnd_int]._voltage_recording()
return soma_v_vec"""
def add_connection(self, conn):
self.connections.append(conn)
def get_properties(self):
"""Get the properties of the network"""
try:
ap_time_stamps = [x[0].as_numpy() for x in self.ap_counters]
except:
ap_time_stamps = [np.array(x[0]) for x in self.ap_counters]
ap_numbers = [x[1].n for x in self.ap_counters]
try:
v_rec = [x.as_numpy() for x in self.VRecords]
vclamp_i = [x.as_numpy() for x in self.VClamps_i]
except:
v_rec = [np.array(x) for x in self.VRecords]
vclamp_i = [np.array(x) for x in self.VClamps_i]
properties = {'parent_network': str(self.parent_network),
'cell_type': self.cell_type.name,
'cell_number': self.get_cell_number(),
'connections': [conn.get_properties()
for conn in self.connections],
'ap_time_stamps': ap_time_stamps,
'ap_number': ap_numbers,
'v_records': v_rec,
'VClamps_i': vclamp_i}
properties
return properties
def __str__(self):
return self.cell_type.name + 'Population'
def __iter__(self):
return self
def __getitem__(self, item):
return self.cells[item]
def __next__(self):
if self.i < (len(self.cells)):
i = self.i
self.i += 1
return self.cells[i]
else:
self.i = 0
raise StopIteration()
def next(self):
return self.__next__()
# HELPERS
def pos(rad):
"""
(x,y) position of a point on a circle with axis origin at (0,0)
and radius 1.
x = cx + r * cos(rad) -> x = cos(rad)
y = cy + r * sin(rad) -> y = sin(rad)
Returns a list of tuples that give the point of each radian passed.
"""
x_arr = list(np.cos(rad))
y_arr = list(np.sin(rad))
return [(x_arr[idx], y_arr[idx]) for idx in range(len(x_arr))]
def euclidian_dist(p1, p2):
""" p1 and p2 must both be of len 2 where p1 = (x1,y1); p2 = (x2,y2)"""
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import *
# NAMESPACES = {'xmlns': 'http://www.w3.org/1999/xhtml'}
# https://blockly-demo.appspot.com/static/demos/code/index.html
class BlocklyXmlBuilderConstantTest(TestCase):
def _constant_test(self, statement, block_type, field_name, value=None):
root = Node.add_root()
node = root.add_child(content_object=statement)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual(block_type, block.get('type'))
self.assertEqual(str(node.id), block.get('id'))
field = block.find('field')
self.assertIsNotNone(field)
self.assertEqual(field_name, field.get('name'))
if value is not None:
self.assertEqual(value, field.text)
else:
self.assertEqual(str(statement.value), field.text)
def test_number_constant(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#zv7x7e
self._constant_test(NumberConstant(value=1.11456), 'math_number', 'NUM')
def test_string_constant(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#94euw4
self._constant_test(StringConstant(value='hello'), 'text', 'TEXT')
def test_boolean_constant_true(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#fdboqf
self._constant_test(BooleanConstant(value=True), 'logic_boolean', 'BOOL', 'TRUE')
def test_boolean_constant_false(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#fksno2
self._constant_test(BooleanConstant(value=False), 'logic_boolean', 'BOOL', 'FALSE')
class BlocklyXmlBuilderReferenceConstantTest(TestCase):
def test_reference_constant(self):
root = Node.add_root()
constant1 = ReferenceConstant.objects.create()
test_model1 = Model.objects.create()
node = root.add_child(content_object=constant1)
node.add_child(content_object=test_model1)
node = Node.objects.get(id=node.id)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual('business_logic_reference', block.get('type'))
self.assertEqual(str(node.id), block.get('id'))
fields = block.findall('field')
self.assertEqual(2, len(fields))
type_field, value_field = fields
self.assertEqual('TYPE', type_field.get('name'))
self.assertEqual('test_app.Model', type_field.text)
self.assertEqual('VALUE', value_field.get('name'))
self.assertEqual(str(test_model1.id), value_field.text)
class BlocklyXmlBuilderAssignmentTest(TestCase):
def test_assignment(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#b7driq
entry_point = variable_assign_value()
assign_node = entry_point.get_children()[1]
xml_str = BlocklyXmlBuilder().build(assign_node)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual('variables_set', block.get('type'))
self.assertEqual(str(assign_node.id), block.get('id'))
field, value = block.getchildren()
self.assertEqual('field', field.tag)
self.assertEqual('VAR', field.get('name'))
self.assertEqual('A', field.text)
self.assertEqual('value', value.tag)
self.assertEqual('VALUE', value.get('name'))
block_value, = value.getchildren()
self.assertEqual('block', block_value.tag)
self.assertEqual('math_number', block_value.get('type'))
class BlocklyXmlBuilderBlockTest(TestCase):
def test_block(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#h333qt
root = Node.add_root()
vars = ('A', 'B', 'C', 'D')
var_defs = {}
for var in vars:
var_def = VariableDefinition(name=var)
var_defs[var] = var_def
root.add_child(content_object=var_def)
root = Node.objects.get(id=root.id)
for i, var in enumerate(vars, 1):
assignment_node = root.add_child(content_object=Assignment())
assignment_node.add_child(content_object=Variable(definition=var_defs[var]))
assignment_node.add_child(content_object=NumberConstant(value=i))
root = Node.objects.get(id=root.id)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
for i, var in enumerate(vars):
var_value = i + 1.0
variables_set_block_xpath = '/xml/block' + '/next/block' * i
block = xml.xpath(variables_set_block_xpath)
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual('variables_set', block.get('type'))
field = block.find('field')
self.assertEqual('VAR', field.get('name'))
self.assertEqual(var, field.text)
value = block.find('value')
self.assertEqual('VALUE', value.get('name'))
math_number, = value.getchildren()
self.assertEqual('math_number', math_number.get('type'))
field, = math_number.getchildren()
self.assertEqual('NUM', field.get('name'))
self.assertEqual(str(var_value), field.text)
def test_block_if_sequence(self):
root = Node.add_root()
for i in range(2):
root, var_defs = create_if_statement(2, root=root)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block/next/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual('controls_if', block.get('type'))
class BlocklyXmlDateTest(TestCase):
def test_block_date(self):
today = datetime.date.today()
root = Node.add_root(content_object=DateConstant(value=today))
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual('business_logic_date', block.get('type'))
field = block.find('field')
self.assertEqual('DATE', field.get('name'))
self.assertEqual(today.strftime('%Y-%m-%d'), field.text)
class BlocklyXmlBuilderBinaryOperatorTest(TestCase):
def _test_math_binary_operator(self, operator, block_type, operator_field_value):
root = Node.add_root(content_object=BinaryOperator(operator=operator))
for i in (1.0, 2.0):
root.add_child(content_object=NumberConstant(value=i))
root = Node.objects.get(id=root.id)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual(block_type, block.get('type'))
field = xml.xpath('/xml/block/field')[0]
self.assertEqual('OP', field.get('name'))
self.assertEqual(operator_field_value, field.text)
for field_value, field_name in enumerate(('A', 'B'), 1):
value = xml.xpath('/xml/block/value[@name="{}"]'.format(field_name))[0]
math_number = value.find('block')
self.assertEqual('math_number', math_number.get('type'))
field, = math_number.getchildren()
self.assertEqual('NUM', field.get('name'))
self.assertEqual(str(float(field_value)), field.text)
def _test_logic_binary_operator(self, operator, block_type, operator_field_value):
root = Node.add_root(content_object=BinaryOperator(operator=operator))
for i in (True, False):
root.add_child(content_object=BooleanConstant(value=i))
root = Node.objects.get(id=root.id)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.xpath('/xml/block')
self.assertEqual(1, len(block))
block = block[0]
self.assertEqual(block_type, block.get('type'))
field = xml.xpath('/xml/block/field')[0]
self.assertEqual('OP', field.get('name'))
self.assertEqual(operator_field_value, field.text)
for field_value, field_name in ((True, 'A'), (False, 'B')):
value = xml.xpath('/xml/block/value[@name="{}"]'.format(field_name))[0]
math_number = value.find('block')
self.assertEqual('logic_boolean', math_number.get('type'))
field, = math_number.getchildren()
self.assertEqual('BOOL', field.get('name'))
self.assertEqual(str(field_value).upper(), field.text)
def test_operator_add(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#mzvwas
self._test_math_binary_operator('+', 'math_arithmetic', 'ADD')
def test_operator_minus(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#ec7z5c
self._test_math_binary_operator('-', 'math_arithmetic', 'MINUS')
def test_operator_mul(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#nzq3w5
self._test_math_binary_operator('*', 'math_arithmetic', 'MULTIPLY')
def test_operator_div(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#qzqt69
self._test_math_binary_operator('/', 'math_arithmetic', 'DIVIDE')
def test_operator_pow(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#skakny
self._test_math_binary_operator('^', 'math_arithmetic', 'POWER')
def test_operator_eq(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#rzsfmb
self._test_math_binary_operator('==', 'logic_compare', 'EQ')
def test_operator_ne(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#xj34cw
self._test_math_binary_operator('!=', 'logic_compare', 'NEQ')
def test_operator_lt(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#oap9ne
self._test_math_binary_operator('<', 'logic_compare', 'LT')
def test_operator_lte(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#qkk5jx
self._test_math_binary_operator('<=', 'logic_compare', 'LTE')
def test_operator_gt(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#qxdp8u
self._test_math_binary_operator('>', 'logic_compare', 'GT')
def test_operator_gte(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#kggfq9
self._test_math_binary_operator('>=', 'logic_compare', 'GTE')
def test_operator_and(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#n2uf8x
self._test_logic_binary_operator('&', 'logic_operation', 'AND')
def test_operator_or(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#baz5xq
self._test_logic_binary_operator('|', 'logic_operation', 'OR')
class BlocklyXmlBuilderIfStatementTest(TestCase):
def _test_statement(self, xml, statement_name, variable_name):
self.assertEqual('statement', xml.tag)
self.assertEqual(statement_name, xml.get('name'))
variables_set_block, = xml.getchildren()
self.assertEqual('variables_set', variables_set_block.get('type'))
field, value = variables_set_block.getchildren()
self.assertEqual('field', field.tag)
self.assertEqual('VAR', field.get('name'))
self.assertEqual(variable_name, field.text)
self.assertEqual('value', value.tag)
self.assertEqual('VALUE', value.get('name'))
block_value, = value.getchildren()
self.assertEqual('block', block_value.tag)
self.assertEqual('logic_boolean', block_value.get('type'))
field, = block_value.getchildren()
self.assertEqual('field', field.tag)
self.assertEqual('BOOL', field.get('name'))
self.assertEqual('TRUE', field.text)
def _test_condition(self, xml, condition_name, variable_name, use_binary_operator=False):
self.assertEqual('value', xml.tag)
self.assertEqual(condition_name, xml.get('name'))
if_value_block, = xml.getchildren()
self.assertEqual('block', if_value_block.tag)
if use_binary_operator:
self.assertEqual('logic_operation', if_value_block.get('type'))
else:
self.assertEqual('variables_get', if_value_block.get('type'))
if_condition_var_field, = if_value_block.getchildren()
self.assertEqual('field', if_condition_var_field.tag)
self.assertEqual('VAR', if_condition_var_field.get('name'))
self.assertEqual(variable_name, if_condition_var_field.text)
def _test_block(self, xml):
self.assertEqual('block', xml.tag)
self.assertEqual('controls_if', xml.get('type'))
def test_if(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#k5ygcz
node, _ = create_if_statement(2)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(2, len(children))
if_value = children[0]
self._test_condition(if_value, 'IF0', 'IfCondition')
if_statement = children[1]
self._test_statement(if_statement, 'DO0', 'IfEnter')
def test_if_not_variable_condition(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#hzuarv
node, _ = create_if_statement(2, use_binary_operator=True)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(2, len(children))
if_value = children[0]
self._test_condition(if_value, 'IF0', 'IfCondition', use_binary_operator=True)
if_statement = children[1]
self._test_statement(if_statement, 'DO0', 'IfEnter')
def test_if_else(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#9yax5x
node, _ = create_if_statement(3)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(4, len(children))
mutation = children[0]
self.assertEqual('mutation', mutation.tag)
self.assertEqual('1', mutation.get('else'))
self.assertEqual(None, mutation.get('elseif'))
self._test_condition(children[1], 'IF0', 'IfCondition')
self._test_statement(children[2], 'DO0', 'IfEnter')
self._test_statement(children[3], 'ELSE', 'ElseEnter')
def test_elif(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#tqx7e5
node, _ = create_if_statement(4)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(5, len(children))
mutation = children[0]
self.assertEqual('mutation', mutation.tag)
self.assertEqual(None, mutation.get('else'))
self.assertEqual('1', mutation.get('elseif'))
self._test_condition(children[1], 'IF0', 'IfCondition')
self._test_statement(children[2], 'DO0', 'IfEnter')
self._test_condition(children[3], 'IF1', 'ElseIfCondition1')
self._test_statement(children[4], 'DO1', 'ElseIfEnter1')
def test_elif_else(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#y8nw8p
node, _ = create_if_statement(5)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(6, len(children))
mutation = children[0]
self.assertEqual('mutation', mutation.tag)
self.assertEqual('1', mutation.get('else'))
self.assertEqual('1', mutation.get('elseif'))
self._test_condition(children[1], 'IF0', 'IfCondition')
self._test_statement(children[2], 'DO0', 'IfEnter')
self._test_condition(children[3], 'IF1', 'ElseIfCondition1')
self._test_statement(children[4], 'DO1', 'ElseIfEnter1')
self._test_statement(children[5], 'ELSE', 'ElseEnter')
def test_elif_2(self):
# https://blockly-demo.appspot.com/static/demos/code/index.html#7iucn3
node, _ = create_if_statement(6)
xml_str = BlocklyXmlBuilder().build(node)
xml = etree.parse(StringIO(xml_str))
block, = xml.getroot().getchildren()
self._test_block(block)
children = block.getchildren()
self.assertEqual(7, len(children))
mutation = children[0]
self.assertEqual('mutation', mutation.tag)
self.assertEqual(None, mutation.get('else'))
self.assertEqual('2', mutation.get('elseif'))
self._test_condition(children[1], 'IF0', 'IfCondition')
self._test_statement(children[2], 'DO0', 'IfEnter')
self._test_condition(children[3], 'IF1', 'ElseIfCondition1')
self._test_statement(children[4], 'DO1', 'ElseIfEnter1')
self._test_condition(children[5], 'IF2', 'ElseIfCondition2')
self._test_statement(children[6], 'DO2', 'ElseIfEnter2')
class BlocklyXmlBuilderArgumentFieldTest(TestCase):
def test_argument_field_set(self):
root = variable_assign_value(variable_name='argument.field')
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.find('/block')
self.assertEqual('business_logic_argument_field_set', block.get('type'))
def test_argument_field_get(self):
variable_definition = VariableDefinition.objects.create(name='argument.field')
variable = Variable.objects.create(definition=variable_definition)
root = Node.add_root(content_object=variable)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.find('/block')
self.assertEqual('business_logic_argument_field_get', block.get('type'))
class BlocklyXmlBuilderFunctionTest(TestCase):
def test_function_without_args(self):
function_definition = PythonCodeFunctionDefinition.objects.create(title='xxx')
root = Node.add_root(content_object=Function(definition=function_definition))
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.find('/block')
self.assertEqual('business_logic_function', block.get('type'))
children = block.getchildren()
self.assertEqual(2, len(children))
mutation = children[0]
self.assertEqual('true', mutation.get('args'))
name_field = children[1]
self.assertEqual('FUNC', name_field.get('name'))
self.assertEqual(function_definition.title, name_field.text)
def test_function_with_args(self):
function_definition = PythonCodeFunctionDefinition.objects.create(title='xxx')
root = Node.add_root(content_object=Function(definition=function_definition))
root.add_child(content_object=NumberConstant(value=3))
root = Node.objects.get(id=root.id)
xml_str = BlocklyXmlBuilder().build(root)
xml = etree.parse(StringIO(xml_str))
block = xml.find('/block')
self.assertEqual('business_logic_function', block.get('type'))
children = block.getchildren()
self.assertEqual(3, len(children))
mutation = children[0]
self.assertEqual('true', mutation.get('args'))
name_field = children[1]
self.assertEqual('FUNC', name_field.get('name'))
self.assertEqual(function_definition.title, name_field.text)
arg0_value = children[2]
self.assertEqual('value', arg0_value.tag)
self.assertEqual('ARG0', arg0_value.get('name'))
arg0_value_children = arg0_value.getchildren()
self.assertEqual(1, len(arg0_value_children))
arg0_value_block = arg0_value_children[0]
self.assertEqual('block', arg0_value_block.tag)
self.assertEqual('math_number', arg0_value_block.get('type'))
| |
try:
from urllib.parse import urlparse, urlunparse
except ImportError: # Python 2
from urlparse import urlparse, urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, QueryDict
from django.template.response import TemplateResponse
from django.utils.http import base36_to_int
from django.utils.translation import ugettext as _
from django.shortcuts import resolve_url
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login, logout as auth_logout, get_user_model
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, PasswordResetForm, SetPasswordForm, PasswordChangeForm
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
current_app=None, extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.REQUEST.get(redirect_field_name, '')
if request.method == "POST":
form = authentication_form(data=request.POST)
if form.is_valid():
# Use default setting if redirect_to is empty
if not redirect_to:
redirect_to = settings.LOGIN_REDIRECT_URL
redirect_to = resolve_url(redirect_to)
netloc = urlparse(redirect_to)[1]
# Heavier security check -- don't allow redirection to a different
# host.
if netloc and netloc != request.get_host():
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security checks complete. Log the user in.
auth_login(request, form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
request.session.set_test_cookie()
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
current_app=None, extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
redirect_to = request.REQUEST.get(redirect_field_name, '')
if redirect_to:
netloc = urlparse(redirect_to)[1]
# Security check -- don't allow redirection to a different host.
if not (netloc and netloc != request.get_host()):
return HttpResponseRedirect(redirect_to)
if next_page is None:
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
else:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page or request.path)
def logout_then_login(request, login_url=None, current_app=None, extra_context=None):
"""
Logs out the user if he is logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, current_app=current_app, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@csrf_protect
def password_reset(request, is_admin_site=False,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
current_app=None,
extra_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_done')
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
}
if is_admin_site:
opts = dict(opts, domain_override=request.META['HTTP_HOST'])
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def password_reset_done(request,
template_name='registration/password_reset_done.html',
current_app=None, extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb36=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb36 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete')
try:
uid_int = base36_to_int(uidb36)
user = UserModel.objects.get(id=uid_int)
except (ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(None)
else:
validlink = False
form = None
context = {
'form': form,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
current_app=None, extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL)
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@sensitive_post_parameters()
@csrf_protect
@login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('django.contrib.auth.views.password_change_done')
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
@login_required
def password_change_done(request,
template_name='registration/password_change_done.html',
current_app=None, extra_context=None):
context = {}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context,
current_app=current_app)
| |
"""
A GUI to automatically create the twist structure for a forearm.
To use this tool, select one or more wrist joints and enter the desired data
into the option fields before pressing either the Create or Apply button.
To skin to the model, the forearm mesh should be skinned in segments for each
twist joint, plus one additional for the elbow joint, where the segment at the
proximal end of the elbow is skinned to the elbow joint and the final segment
before the hand is skinned to the final twist joint.
\par Setup Forearm Options:
- \b Suffix \b of \b New \b Twist \b Joints:
Specifies the base naming suffix to apply to the newly created joints. Their
prefix will match the shoulder on which they are twisting and they will also be
numbered from 1 to n.
- \b Number \b of \b Twist \b Joints:
Specifies the number of twist joints to create for each hip. You must create at
least one and the first will always have the hip constraint applied to it.
\par Aim Constraint Options:
- \b Elbow \b Aim \b Axis:
Corresponds to the axis in the forearm's local space that aims toward the hand
joint.
- \b Elbow \b Front \b Axis:
Corresponds to the axis in the forearm's local space that points toward the
character's front.
- \b Hand \b Front \b Axis:
Corresponds to the axis in the hand's local space that points toward the front
side of the hand (the side with the thumb on it).
\b Creation \b Info:
\b Donations: http://adammechtley.com/donations/
\b License: The MIT License
Copyright (c) 2011 Adam Mechtley (http://adammechtley.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the 'Software'), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
\namespace amTools.rigging.forearmSetup
"""
import sys
import maya.cmds as cmds
import amTools.utilities as utils
import amTools.utilities.ui as amui
## options window name
kSetupOptionsWindow = 'am_setupForearmOptionsWindow'
## name of the tool
kToolName = 'Setup Forearm'
## current version of the tool
kVersionNumber = '1.03'
## date of current version
kVersionDate = '2011.03.27'
def menuItem(*args):
"""This function calls optionsWindow() from a menu item"""
optionsWindow()
def optionsWindow():
"""This function creates an options window for creating the forearm twist
structure. When executing it, select the wrists in the arms you are setting
up, then press Create or Apply."""
# create the main interface
if cmds.window(kSetupOptionsWindow, q=True, ex=True):
cmds.deleteUI(kSetupOptionsWindow)
mainWindow = cmds.window(kSetupOptionsWindow, title='%s Options'%kToolName, menuBar=True, wh=(545,350))
# build the menu bar
cmds.menu(label='Help')
amui.helpMenuItem(kToolName, __file__)
amui.aboutMenuItem(kToolName, kVersionNumber, kVersionDate)
mainForm = cmds.formLayout(nd=100)
# build the section to get information about the new twist joints
if_suffixName = cmds.textFieldGrp(text='_Twist', label='Suffix of New Twist Joints:')
if_numberTwistJoints = cmds.intSliderGrp(v=3, min=1, max=10, fmn=1, fmx=100, label='Number of Twist Joints:', field=True)
# position the input fields for the twist joints
cmds.formLayout(mainForm, edit=True, attachForm=[(if_suffixName, 'left', 30), (if_suffixName, 'top', 5)], attachNone=[(if_suffixName, 'right'), (if_suffixName, 'bottom')])
cmds.formLayout(mainForm, edit=True, attachForm=[(if_numberTwistJoints, 'left', 30)], attachNone=[(if_numberTwistJoints, 'right'), (if_numberTwistJoints, 'bottom')], attachControl=[(if_numberTwistJoints, 'top', 5, if_suffixName)])
# build the section to get information for the aim constraint
constraintFrame = eval('cmds.frameLayout(collapsable=True, label="Aim Constraint Options:" %s)'%amui.__frameAlignCenter__)
constraintForm = cmds.formLayout(nd=100)
if_elbowAimAxis = cmds.floatFieldGrp(v1=1, v2=0, v3=0, nf=3, pre=4, label='Elbow Aim Axis:')
if_elbowFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Elbow Front Axis:')
if_handFrontAxis = cmds.floatFieldGrp(v1=0, v2=0, v3=1, nf=3, pre=4, label='Hand Front Axis:')
# position the input fields for the aim constraint
cmds.formLayout(constraintForm, edit=True, attachForm=[(if_elbowAimAxis, 'left', 30), (if_elbowAimAxis, 'top', 5)], attachNone=[(if_elbowAimAxis, 'right'), (if_elbowAimAxis, 'bottom')])
cmds.formLayout(constraintForm, edit=True, attachForm=[(if_elbowFrontAxis, 'left', 30)], attachNone=[(if_elbowFrontAxis, 'right'), (if_elbowFrontAxis, 'bottom')], attachControl=[(if_elbowFrontAxis, 'top', 5, if_elbowAimAxis)])
cmds.formLayout(constraintForm, edit=True, attachForm=[(if_handFrontAxis, 'left', 30)], attachNone=[(if_handFrontAxis, 'right'), (if_handFrontAxis, 'bottom')], attachControl=[(if_handFrontAxis, 'top', 5, if_elbowFrontAxis)])
cmds.setParent('..') # go up to constraintForm
cmds.setParent('..') # go up to mainForm
# position the frame for the aim constraint
cmds.formLayout(mainForm, edit=True, attachPosition=[(constraintFrame, 'left', -1, 0), (constraintFrame, 'right', -1, 100)], attachControl=[(constraintFrame, 'top', 5, if_numberTwistJoints)], attachNone=[(constraintFrame, 'bottom')])
# create the buttons to execute the script
cmd_create='amTools.rigging.forearmSetup.doOptions ("%s", "%s", "%s", "%s", "%s")'%(
if_suffixName,
if_numberTwistJoints,
if_elbowAimAxis,
if_elbowFrontAxis,
if_handFrontAxis)
utils.ui.threeButtonLayout(mainForm, mainWindow, cmd_create)
cmds.showWindow(mainWindow)
def doOptions(input_suffix, input_numberTwistJoints, input_elbowAimAxis, input_elbowFrontAxis, input_handFrontAxis):
"""Specifies the function called when the apply or create button is clicked"""
try:
# validate selection
selection = utils.dg.validateSelection(type='transform', name='wrist joint objects', min=1)
# validate suffix
suffix = cmds.textFieldGrp(input_suffix, q=True, tx=True)
utils.dg.validateAffix(suffix)
# set up the forearms
numberTwistJoints = cmds.intSliderGrp(input_numberTwistJoints, q=True, v=True)
newSelection = []
# perform setup for each wrist in the selection
for wrist in selection:
elbow = cmds.listRelatives(wrist, p=True, f=True)
elbowShort = cmds.listRelatives(wrist, p=True)
newJoints = doSetup(
elbowShort[0] + suffix,
numberTwistJoints,
wrist,
elbow[0],
cmds.floatFieldGrp(input_elbowAimAxis, q=True, v=True),
cmds.floatFieldGrp(input_elbowFrontAxis, q=True, v=True),
cmds.floatFieldGrp(input_handFrontAxis, q=True, v=True))
newSelection += newJoints
# select the newly created joints for easy editing
cmds.select(newSelection)
except: raise
def doSetup(baseName, numberTwistJoints, wrist, elbow, elbowAimAxis, elbowFrontAxis, handFrontAxis):
"""This function creates the new twist joints and returns a list of their names."""
try:
# validate baseName
utils.dg.validateNodeName(baseName)
# validate incoming object names
utils.dg.verifyNode(wrist)
utils.dg.verifyNode(elbow)
# get the translation values for the wrist
wristTranslate = cmds.getAttr('%s.translate'%wrist)
# see if there is a side label
bodySide = cmds.getAttr('%s.side'%elbow)
# find out what rotate order the elbow is using
rotateOrder = cmds.getAttr('%s.rotateOrder'%elbow)
# create the twist joints
twistJoints = []
for ctr in range(numberTwistJoints):
i = numberTwistJoints - ctr
cmds.select(cl=True)
newJoint = cmds.joint(name='%s%s'%(baseName, i))
jointRadius = 0.0
cmds.parent('|' + newJoint, elbow)
newJoint = (elbow + '|' + newJoint)
# if the elbow object is a joint, then use its radius and jointOrient as base values
if cmds.objectType(elbow, isType='joint'): jointRadius = cmds.getAttr('%s.radius'%elbow) * 0.5
else: jointRadius = 1.0
cmds.setAttr('%s.radius'%newJoint, jointRadius)
cmds.setAttr('%s.jointOrient'%newJoint, 0, 0, 0)
cmds.setAttr('%s.translate'%newJoint, wristTranslate[0][0]/(numberTwistJoints+1)*i, wristTranslate[0][1]/(numberTwistJoints+1)*i, wristTranslate[0][2]/(numberTwistJoints+1)*i)
# set up the final joint
if i is numberTwistJoints:
# create the aim constraint
cmds.aimConstraint(
[wrist, newJoint],
aimVector=[elbowAimAxis[0], elbowAimAxis[1], elbowAimAxis[2]],
upVector=[elbowFrontAxis[0], elbowFrontAxis[1], elbowFrontAxis[2]],
worldUpVector=[handFrontAxis[0], handFrontAxis[1], handFrontAxis[2]],
worldUpObject=wrist,
worldUpType='objectrotation')
# set up the rest of the joints
else:
# create the orient constraint
orientConstraint = cmds.orientConstraint(elbow, twistJoints[0], newJoint)
targetWeights = cmds.orientConstraint(q=True, weightAliasList=True)
cmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[0]), numberTwistJoints - i)
cmds.setAttr('%s.%s'%(orientConstraint[0], targetWeights[1]), i)
cmds.setAttr('%s.interpType'%orientConstraint[0], 1)
# set label and rotate order
cmds.setAttr('%s.side'%newJoint, bodySide)
cmds.setAttr('%s.type'%newJoint, 18)
cmds.setAttr('%s.otherType'%newJoint, 'Forearm Twist %s'%(i + 1), type='string')
cmds.setAttr('%s.rotateOrder'%newJoint, rotateOrder)
# add the new joint to the list to return
twistJoints.append(newJoint)
return twistJoints;
except: raise
| |
# Copyright (c) 2013 Marion Zepf
# Copyright (c) 2014 Walter Bender
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import ast
from gettext import gettext as _
from math import sqrt
from random import uniform
import traceback
import inspect
# from ast_pprint import * # only used for debugging, safe to comment out
from .tablock import Media
from .tacanvas import TurtleGraphics
from .taconstants import (Color, CONSTANTS, ColorObj, Vector)
from .talogo import (LogoCode, logoerror, NegativeRootError)
from .taturtle import (Turtle, Turtles)
from TurtleArt.tatype import (TYPE_CHAR, TYPE_INT, TYPE_FLOAT, TYPE_OBJECT,
TYPE_MEDIA, TYPE_COLOR, BOX_AST, ACTION_AST,
TYPE_VECTOR,
Type, TypeDisjunction, TATypeError, get_type,
TypedSubscript, TypedName, is_bound_method,
is_instancemethod, is_staticmethod,
identity, get_converter, convert, get_call_ast)
from .tautils import debug_output
from .tawindow import (TurtleArtWindow, global_objects, plugins_in_use)
from .util import ast_extensions
class PyExportError(BaseException):
""" Error that is raised when something goes wrong while converting the
blocks to python code """
def __init__(self, message, block=None):
""" message -- the error message
block -- the block where the error occurred """
self.message = message
self.block = block
def __str__(self):
if self.block is not None:
return _("error in highlighted block") + ": " + str(self.message)
else:
return _("error") + ": " + str(self.message)
class Primitive(object):
""" Something that can be called when the block code is executed in TA,
but that can also be transformed into a Python AST."""
_DEBUG = False
STANDARD_OPERATORS = {'plus': (ast.UAdd, ast.Add),
'minus': (ast.USub, ast.Sub),
'multiply': ast.Mult,
'divide': ast.Div,
'modulo': ast.Mod,
'power': ast.Pow,
'and_': ast.And,
'or_': ast.Or,
'not_': ast.Not,
'equals': ast.Eq,
'less': ast.Lt,
'greater': ast.Gt}
def __init__(self, func, return_type=TYPE_OBJECT, arg_descs=None,
kwarg_descs=None, call_afterwards=None, export_me=True):
""" return_type -- the type (from the type hierarchy) that this
Primitive will return
arg_descs, kwarg_descs -- a list of argument descriptions and
a dictionary of keyword argument descriptions. An argument
description can be either an ArgSlot or a ConstantArg.
call_afterwards -- Code to call after this Primitive has been called
(e.g., for updating labels in LogoCode) (not used for creating
AST)
export_me -- True iff this Primitive should be exported to Python
code (the default case) """
self.func = func
self.return_type = return_type
if arg_descs is None:
self.arg_descs = []
else:
self.arg_descs = arg_descs
if kwarg_descs is None:
self.kwarg_descs = {}
else:
self.kwarg_descs = kwarg_descs
self.call_afterwards = call_afterwards
self.export_me = export_me
def copy(self):
""" Return a Primitive object with the same attributes as this one.
Shallow-copy the arg_descs and kwarg_descs attributes. """
arg_descs_copy = self.arg_descs[:]
if isinstance(self.arg_descs, ArgListDisjunction):
arg_descs_copy = ArgListDisjunction(arg_descs_copy)
return Primitive(self.func,
return_type=self.return_type,
arg_descs=arg_descs_copy,
kwarg_descs=self.kwarg_descs.copy(),
call_afterwards=self.call_afterwards,
export_me=self.export_me)
def __repr__(self):
return "Primitive(%s -> %s)" % (repr(self.func), str(self.return_type))
@property
def __name__(self):
return self.func.__name__
def get_name_for_export(self):
""" Return the expression (as a string) that represents this Primitive
in the exported Python code, e.g., 'turtle.forward'. """
func_name = ""
if self.wants_turtle():
func_name = "turtle."
elif self.wants_turtles():
func_name = "turtles."
elif self.wants_canvas():
func_name = "canvas."
elif self.wants_logocode():
func_name = "logo."
elif self.wants_heap():
func_name = "logo.heap."
elif self.wants_tawindow():
func_name = "tw."
else:
results, plugin = self.wants_plugin()
if results:
for k in list(global_objects.keys()):
if k == plugin:
if k not in plugins_in_use:
plugins_in_use.append(k)
func_name = k.lower() + '.'
break
# get the name of the function directly from the function itself
func_name += self.func.__name__
return func_name
def are_slots_filled(self):
""" Return True iff none of the arg_descs or kwarg_descs is an
ArgSlot. """
for arg_desc in self.arg_descs:
if isinstance(arg_desc, ArgSlot):
return False
for key in self.kwarg_descs:
if isinstance(self.kwarg_descs[key], ArgSlot):
return False
return True
def fill_slots(self, arguments=None, keywords=None, convert_to_ast=False,
call_my_args=True):
""" Return a copy of this Primitive whose ArgSlots are filled with
the given arguments, turned into ConstantArgs. Call the arguments,
apply their wrappers, and check their types as appropriate. """
if arguments is None:
arguments = []
if keywords is None:
keywords = {}
new_prim = self.copy()
if isinstance(new_prim.arg_descs, ArgListDisjunction):
slot_list_alternatives = list(new_prim.arg_descs)
else:
slot_list_alternatives = [new_prim.arg_descs]
# arguments
error = None
filler = None
for slot_list in slot_list_alternatives:
error = None
new_slot_list = []
filler_list = list(arguments[:])
for slot in slot_list:
if isinstance(slot, ArgSlot):
filler = filler_list.pop(0)
try:
const = slot.fill(filler,
convert_to_ast=convert_to_ast,
call_my_args=call_my_args)
except TATypeError as e:
error = e
if Primitive._DEBUG:
traceback.print_exc()
break
else:
new_slot_list.append(const)
else:
new_slot_list.append(slot)
if error is None:
new_prim.arg_descs = new_slot_list
break
if error is not None:
raise error
# keyword arguments
for key in keywords:
kwarg_desc = new_prim.kwarg_descs[key]
if isinstance(kwarg_desc, ArgSlot):
const = kwarg_desc.fill(keywords[key],
convert_to_ast=convert_to_ast,
call_my_args=call_my_args)
new_prim.kwarg_descs[key] = const
return new_prim
def get_values_of_filled_slots(self, exportable_only=False):
""" Return the values of all filled argument slots as a list, and
the values of all filled keyword argument slots as a dictionary.
Ignore all un-filled (keyword) argument slots.
exportable_only -- return only exportable values and convert values
to ASTs instead of calling them """
new_args = []
for c_arg in self.arg_descs:
if isinstance(c_arg, ConstantArg) and \
(not exportable_only or export_me(c_arg.value)):
new_args.append(c_arg.get(convert_to_ast=exportable_only))
new_kwargs = {}
for key in self.kwarg_descs:
if isinstance(self.kwarg_descs[key], ConstantArg) and \
(not exportable_only or export_me(
self.kwarg_descs[key].value)):
new_kwargs[key] = self.kwarg_descs[key].get(
convert_to_ast=exportable_only)
return new_args, new_kwargs
def allow_call_args(self, recursive=False):
""" Set call_args attribute of all argument descriptions to True
recursive -- recursively call allow_call_args on all constant args
that are Primitives """
for arg_desc in self.arg_descs:
arg_desc.call_arg = True
if recursive and isinstance(arg_desc, ConstantArg) and \
isinstance(arg_desc.value, Primitive):
arg_desc.value.allow_call_args(recursive=True)
for kwarg_desc in self.kwarg_descs:
kwarg_desc.call_arg = True
if recursive and isinstance(kwarg_desc, ConstantArg) and \
isinstance(kwarg_desc.value, Primitive):
kwarg_desc.value.allow_call_args(recursive=True)
def __call__(self, *runtime_args, **runtime_kwargs):
""" Execute the function, passing it the arguments received at
runtime. Also call the function in self.call_afterwards and pass it
all runtime_args and runtime_kwargs.
If the very first argument is a LogoCode instance, it is removed.
The active turtle, the Turtles object, the canvas, the LogoCode
object, or the TurtleArtWindow object will be prepended to the
arguments (depending on what this Primitive wants). """
# remove the first argument if it is a LogoCode instance
if runtime_args and isinstance(runtime_args[0], LogoCode):
runtime_args = runtime_args[1:]
if Primitive._DEBUG:
debug_output(repr(self))
debug_output(" runtime_args: " + repr(runtime_args))
# fill the ArgSlots with the runtime arguments
new_prim = self.fill_slots(runtime_args, runtime_kwargs,
convert_to_ast=False)
if not new_prim.are_slots_filled():
raise logoerror("#syntaxerror")
if Primitive._DEBUG:
debug_output(" new_prim.arg_descs: " + repr(new_prim.arg_descs))
# extract the actual values from the (now constant) arguments
(new_args, new_kwargs) = new_prim.get_values_of_filled_slots()
if Primitive._DEBUG:
debug_output(" new_args: " + repr(new_args))
debug_output("end " + repr(self))
# what does this primitive want as its first argument?
first_arg = None
if not is_bound_method(new_prim.func):
if new_prim.wants_turtle():
first_arg = global_objects["turtles"].get_active_turtle()
elif new_prim.wants_turtles():
first_arg = global_objects["turtles"]
elif new_prim.wants_canvas():
first_arg = global_objects["canvas"]
elif new_prim.wants_logocode():
first_arg = global_objects["logo"]
elif new_prim.wants_heap():
first_arg = global_objects["logo"].heap
elif new_prim.wants_tawindow():
first_arg = global_objects["window"]
else:
result, plugin = new_prim.wants_plugin()
if result:
first_arg = plugin
# execute the actual function
if first_arg is None:
return_value = new_prim.func(*new_args, **new_kwargs)
else:
return_value = new_prim.func(first_arg, *new_args, **new_kwargs)
if new_prim.call_afterwards is not None:
new_prim.call_afterwards(*new_args, **new_kwargs)
return return_value
def get_ast(self, *arg_asts, **kwarg_asts):
"""Transform this object into a Python AST. When serialized and
executed, the AST will do exactly the same as calling this
object."""
if Primitive._DEBUG:
debug_output(repr(self))
debug_output(" arg_asts: " + repr(arg_asts))
new_prim = self.fill_slots(arg_asts, kwarg_asts, convert_to_ast=True)
if not new_prim.are_slots_filled():
raise PyExportError("not enough arguments")
if Primitive._DEBUG:
debug_output(" new_prim.arg_descs: " + repr(new_prim.arg_descs))
# extract the actual values from the (now constant) arguments
(new_arg_asts, new_kwarg_asts) = new_prim.get_values_of_filled_slots(
exportable_only=True)
if Primitive._DEBUG:
debug_output(" new_arg_asts: " + repr(new_arg_asts))
debug_output("end " + repr(self))
# SPECIAL HANDLING #
# loops
if self == LogoCode.prim_loop:
controller = self._get_loop_controller()
if controller == Primitive.controller_repeat:
# 'repeat' loop
num_repetitions = new_arg_asts[0]
if num_repetitions.func.id == 'controller_repeat':
num_repetitions = num_repetitions.args[0]
repeat_iter = get_call_ast("range", [num_repetitions])
# TODO use new variable name in nested loops
loop_ast = ast.For(target=ast.Name(id="i", ctx=ast.Store),
iter=repeat_iter,
body=new_arg_asts[1],
orelse=[])
return loop_ast
else:
if controller == Primitive.controller_forever:
condition_ast = ast.Name(id="True", ctx=ast.Load)
elif controller == Primitive.controller_while:
condition_ast = new_arg_asts[0].args[0]
elif controller == Primitive.controller_until:
pos_cond_ast = new_arg_asts[0].args[0]
condition_ast = ast.UnaryOp(op=ast.Not,
operand=pos_cond_ast)
else:
raise PyExportError("unknown loop controller: " + repr(
controller))
loop_ast = ast.While(test=condition_ast,
body=new_arg_asts[1],
orelse=[])
# Until always executes its body once.
if controller == Primitive.controller_until:
loop_list = []
for arg_ast in new_arg_asts[1]:
loop_list.append(arg_ast)
loop_list.append(loop_ast)
return loop_list
else:
return loop_ast
# conditionals
elif self in (LogoCode.prim_if, LogoCode.prim_ifelse):
test = new_arg_asts[0]
body = new_arg_asts[1]
if len(new_arg_asts) > 2:
orelse = new_arg_asts[2]
else:
orelse = []
if_ast = ast.If(test=test, body=body, orelse=orelse)
return if_ast
# boxes
elif self == LogoCode.prim_set_box:
target_ast = ast.Subscript(value=BOX_AST,
slice=ast.Index(value=new_arg_asts[0]),
ctx=ast.Store)
return ast.Assign(targets=[target_ast], value=new_arg_asts[1])
elif self == LogoCode.prim_get_box:
return ast.Subscript(value=BOX_AST,
slice=ast.Index(value=new_arg_asts[0]),
ctx=ast.Load)
# action stacks
elif self == LogoCode.prim_define_stack:
return
elif self == LogoCode.prim_invoke_stack:
stack_func = ast.Subscript(
value=ACTION_AST,
slice=ast.Index(value=new_arg_asts[0]), ctx=ast.Load)
call_ast = get_call_ast('logo.icall', [stack_func])
return [call_ast, ast_yield_true()]
elif self == LogoCode.prim_invoke_return_stack:
# FIXME: Need to return value
stack_func = ast.Subscript(
value=ACTION_AST,
slice=ast.Index(value=new_arg_asts[0]), ctx=ast.Load)
call_ast = get_call_ast('logo.icall', [stack_func])
return [call_ast, ast_yield_true()]
# stop stack
elif self == LogoCode.prim_stop_stack:
return ast.Return()
# sleep/ wait
elif self == LogoCode.prim_wait:
return [get_call_ast('sleep', new_arg_asts), ast_yield_true()]
# standard operators
elif self.func.__name__ in Primitive.STANDARD_OPERATORS:
op = Primitive.STANDARD_OPERATORS[self.func.__name__]
# 'divide': prevent unwanted integer division
if self == Primitive.divide:
def _is_float(x):
return get_type(x)[0] == TYPE_FLOAT
if not _is_float(new_arg_asts[0]) and \
not _is_float(new_arg_asts[1]):
new_arg_asts[0] = get_call_ast('float', [new_arg_asts[0]],
return_type=TYPE_FLOAT)
if len(new_arg_asts) == 1:
if isinstance(op, tuple):
op = op[0]
return ast.UnaryOp(op=op, operand=new_arg_asts[0])
elif len(new_arg_asts) == 2:
if isinstance(op, tuple):
op = op[1]
(left, right) = new_arg_asts
if issubclass(op, ast.boolop):
return ast.BoolOp(op=op, values=[left, right])
elif issubclass(op, ast.cmpop):
return ast.Compare(left=left, ops=[op],
comparators=[right])
else:
return ast.BinOp(op=op, left=left, right=right)
# f(x)
elif self == LogoCode.prim_myfunction:
param_asts = []
for id_ in ['x', 'y', 'z'][:len(new_arg_asts) - 1]:
param_asts.append(ast.Name(id=id_, ctx=ast.Param))
func_ast = ast_extensions.LambdaWithStrBody(
body_str=new_arg_asts[0].s, args=param_asts)
return get_call_ast(func_ast, new_arg_asts[1:],
return_type=self.return_type)
# square root
elif self == Primitive.square_root:
return get_call_ast('sqrt', new_arg_asts, new_kwarg_asts,
return_type=self.return_type)
# random
elif self in (Primitive.random_char, Primitive.random_int):
uniform_ast = get_call_ast('uniform', new_arg_asts)
round_ast = get_call_ast('round', [uniform_ast, ast.Num(n=0)])
int_ast = get_call_ast('int', [round_ast], return_type=TYPE_INT)
if self == Primitive.random_char:
chr_ast = get_call_ast('chr', [int_ast], return_type=TYPE_CHAR)
return chr_ast
else:
return int_ast
# identity
elif self == Primitive.identity:
return new_arg_asts[0]
# constant
elif self == CONSTANTS.get:
return TypedSubscript(value=ast.Name(id='CONSTANTS', ctx=ast.Load),
slice_=ast.Index(value=new_arg_asts[0]),
return_type=self.return_type)
# group of Primitives or sandwich-clamp block
elif self in (Primitive.group, LogoCode.prim_clamp):
ast_list = []
for prim in new_arg_asts[0]:
if export_me(prim):
new_ast = value_to_ast(prim)
if isinstance(new_ast, ast.AST):
ast_list.append(new_ast)
return ast_list
# set turtle
elif self == LogoCode.prim_turtle:
text = 'turtle = turtles.get_active_turtle()'
return [get_call_ast('logo.prim_turtle', new_arg_asts),
ast_extensions.ExtraCode(text)]
elif self == LogoCode.active_turtle:
text = 'turtle = turtles.get_active_turtle()'
return ast_extensions.ExtraCode(text)
# comment
elif self == Primitive.comment:
if isinstance(new_arg_asts[0], ast.Str):
text = ' ' + str(new_arg_asts[0].s)
else:
text = ' ' + str(new_arg_asts[0])
return ast_extensions.Comment(text)
# print
elif self == TurtleArtWindow.print_:
func_name = self.get_name_for_export()
call_ast = get_call_ast(func_name, new_arg_asts)
print_ast = ast.Print(values=new_arg_asts[:1], dest=None, nl=True)
return [call_ast, print_ast]
# heap
elif self == LogoCode.get_heap:
return TypedName(id_='logo.heap', return_type=self.return_type)
elif self == LogoCode.reset_heap:
target_ast = ast.Name(id='logo.heap', ctx=ast.Store)
value_ast = ast.List(elts=[], ctx=ast.Load)
return ast.Assign(targets=[target_ast], value=value_ast)
# NORMAL FUNCTION CALL #
else:
func_name = self.get_name_for_export()
return get_call_ast(func_name, new_arg_asts, new_kwarg_asts,
return_type=self.return_type)
def __eq__(self, other):
""" Two Primitives are equal iff their all their properties are equal.
Consider bound and unbound methods equal. """
# other is a Primitive
if isinstance(other, Primitive):
return self == other.func and \
self.return_type == other.return_type and \
self.arg_descs == other.arg_descs and \
self.kwarg_descs == other.kwarg_descs and \
self.call_afterwards == other.call_afterwards and \
self.export_me == other.export_me
# other is a callable
elif callable(other):
if is_instancemethod(self.func) != is_instancemethod(other):
return False
elif is_instancemethod(self.func): # and is_instancemethod(other):
return self.func.__self__.__class__ == \
other.__self__.__class__ and \
self.func.__func__ == other.__func__
else:
return self.func == other
elif is_staticmethod(other):
return self.func == other.__func__
# other is neither a Primitive nor a callable
else:
return False
def wants_turtle(self):
"""Does this Primitive want to get the active turtle as its first
argument?"""
return self._wants(Turtle)
def wants_turtles(self):
""" Does this Primitive want to get the Turtles instance as its
first argument? """
return self._wants(Turtles)
def wants_canvas(self):
""" Does this Primitive want to get the canvas as its first
argument? """
return self._wants(TurtleGraphics)
def wants_logocode(self):
""" Does this Primitive want to get the LogoCode instance as its
first argument? """
return self.func.__name__ == '<lambda>' or self._wants(LogoCode)
def wants_heap(self):
""" Does this Primitive want to get the heap as its first argument? """
return (hasattr(self.func, '__self__'
) and isinstance(self.func.__self__, list)) or \
self.func in list(list.__dict__.values())
def wants_tawindow(self):
""" Does this Primitive want to get the TurtleArtWindow instance
as its first argument? """
return self._wants(TurtleArtWindow)
def wants_plugin(self):
"""Does this Primitive want to get a plugin instance as its first
argument? """
for obj in list(global_objects.keys()):
if self._wants(global_objects[obj].__class__):
return True, obj
return False, None
def wants_nothing(self):
"""Does this Primitive want nothing as its first argument? I.e. does
it want to be passed all the arguments of the block and
nothing else?"""
return not is_instancemethod(self.func)
def _wants(self, theClass):
try:
return inspect.getattr_static(
theClass, self.func.__name__).__class__.__name__ == 'function'
except AttributeError:
return False
# treat the following methods in a special way when converting the
# Primitive to an AST
@staticmethod
def controller_repeat(num):
""" Loop controller for the 'repeat' block """
for i in range(num):
yield True
yield False
@staticmethod
def controller_forever():
""" Loop controller for the 'forever' block """
while True:
yield True
@staticmethod
def controller_while(condition):
""" Loop controller for the 'while' block
condition -- Primitive that is evaluated every time through the
loop """
condition.allow_call_args(recursive=True)
while condition():
yield True
yield False
@staticmethod
def controller_until(condition):
""" Loop controller for the 'until' block
condition -- Primitive that is evaluated every time through the
loop """
condition.allow_call_args(recursive=True)
while not condition():
yield True
yield False
LOOP_CONTROLLERS = [controller_repeat, controller_forever,
controller_while, controller_until]
def _get_loop_controller(self):
""" Return the controller for this loop Primitive. Raise a
ValueError if no controller was found. """
def _is_loop_controller(candidate):
return callable(candidate) and \
candidate in Primitive.LOOP_CONTROLLERS
for desc in self.arg_descs:
if isinstance(desc, ConstantArg):
value = desc.value
if _is_loop_controller(value):
return value
elif isinstance(desc, ArgSlot):
wrapper = desc.wrapper
if _is_loop_controller(wrapper):
return wrapper
# no controller found
raise PyExportError("found no loop controller for " + repr(self))
@staticmethod
def do_nothing():
pass
@staticmethod
def identity(arg):
""" Return the argument unchanged """
return arg
@staticmethod
def group(prim_list):
""" Group together multiple Primitives into one. Treat each Primitive
as a separate line of code. """
return_val = None
for prim in prim_list:
return_val = prim()
return return_val
@staticmethod
def plus(arg1, arg2=None):
""" If only one argument is given, prefix it with '+'. If two
arguments are given, add the second to the first. If the first
argument is a tuple of length 2 and the second is None, use the
values in the tuple as arg1 and arg2. """
if isinstance(arg1, (list, tuple)) and len(arg1) == 2 and arg2 is None:
(arg1, arg2) = arg1
if arg2 is None:
return + arg1
elif isinstance(arg1, Vector) and isinstance(arg2, Vector):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] + arg2.vector[i])
return Vector(arg1.name, vector)
else:
return arg1 + arg2
@staticmethod
def minus(arg1, arg2=None):
""" If only one argument is given, change its sign. If two
arguments are given, subtract the second from the first. """
if arg2 is None:
return - arg1
elif isinstance(arg1, Vector) and isinstance(arg2, Vector):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] - arg2.vector[i])
return Vector(arg1.name, vector)
else:
return arg1 - arg2
@staticmethod
def multiply(arg1, arg2):
""" Multiply the two arguments """
if isinstance(arg1, Vector) and isinstance(arg2, (int, float)):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] * arg2)
return Vector(arg1.name, vector)
elif isinstance(arg2, Vector) and isinstance(arg1, (int, float)):
vector = []
for i in range(len(arg2.vector)):
vector.append(arg2.vector[i] * arg1)
return Vector(arg2.name, vector)
else:
return arg1 * arg2
@staticmethod
def divide(arg1, arg2):
""" Divide the first argument by the second """
if arg2 == 0:
raise logoerror("#zerodivide")
if isinstance(arg1, Vector) and isinstance(arg2, (int, float)):
vector = []
for i in range(len(arg1.vector)):
vector.append(arg1.vector[i] / arg2)
return Vector(arg1.name, vector)
elif isinstance(arg2, Vector) and isinstance(arg1, (int, float)):
vector = []
for i in range(len(arg2.vector)):
vector.append(arg2.vector[i] / arg1)
return Vector(arg2.name, vector)
else:
return float(arg1) / arg2
@staticmethod
def modulo(arg1, arg2):
""" Return the remainder of dividing the first argument by the second.
If the first argument is a string, format it with the value(s) in
the second argument. """
return arg1 % arg2
@staticmethod
def power(arg1, arg2):
""" Raise the first argument to the power given by the second """
return arg1 ** arg2
@staticmethod
def square_root(arg1):
""" Return the square root of the argument. If it is a negative
number, raise a NegativeRootError. """
if arg1 < 0:
raise NegativeRootError(neg_value=arg1)
return sqrt(arg1)
@staticmethod
def and_(arg1, arg2):
""" Logcially conjoin the two arguments (using short-circuting) """
return arg1 and arg2
@staticmethod
def or_(arg1, arg2):
""" Logically disjoin the two arguments (using short-circuting) """
return arg1 or arg2
@staticmethod
def not_(arg):
""" Return True if the argument evaluates to False, and False
otherwise. """
return not arg
@staticmethod
def equals(arg1, arg2):
""" Return arg1 == arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return str(arg1) == str(arg2)
else:
return arg1 == arg2
@staticmethod
def less(arg1, arg2):
""" Return arg1 < arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return float(arg1) < float(arg2)
else:
return arg1 < arg2
@staticmethod
def greater(arg1, arg2):
""" Return arg1 > arg2 """
# See comment in tatype.py TYPE_BOX -> TYPE_COLOR
if isinstance(arg1, ColorObj) or isinstance(arg2, ColorObj):
return float(arg1) > float(arg2)
else:
return arg1 > arg2
@staticmethod
def comment(text):
"""In 'snail' execution mode, display the comment. Else, do
nothing."""
tw = global_objects["window"]
if not tw.hide and tw.step_time != 0:
tw.showlabel('print', text)
@staticmethod
def random_int(lower, upper):
""" Choose a random integer between lower and upper, which must be
integers """
return int(round(uniform(lower, upper), 0))
@staticmethod
def random_char(lower, upper):
""" Choose a random Unicode code point between lower and upper,
which must be integers """
return chr(Primitive.random_int(lower, upper))
class Disjunction(tuple):
""" Abstract disjunction class (not to be instantiated directly) """
def __init__(self, iterable):
tuple(iterable)
def __repr__(self):
s = ["("]
for disj in self:
s.append(repr(disj))
s.append(" or ")
s.pop()
s.append(")")
return "".join(s)
def get_alternatives(self):
""" Return a tuple of alternatives, i.e. self """
return self
class PrimitiveDisjunction(Disjunction, Primitive):
""" Disjunction of two or more Primitives. PrimitiveDisjunctions may not
be nested. """
@property
def return_type(self):
""" Tuple of the return_types of all disjuncts """
return TypeDisjunction((prim.return_type for prim in self))
def __call__(self, *runtime_args, **runtime_kwargs):
""" Loop over the disjunct Primitives and try to fill their slots
with the given args and kwargs. Call the first Primitives whose
slots could be filled successfully. If all disjunct Primitives
fail, raise the last error that occurred. """
# remove the first argument if it is a LogoCode instance
if runtime_args and isinstance(runtime_args[0], LogoCode):
runtime_args = runtime_args[1:]
error = None
for prim in self:
try:
new_prim = prim.fill_slots(runtime_args, runtime_kwargs,
convert_to_ast=False)
except TATypeError:
# on failure, try the next one
continue
else:
# on success, call this Primitive
return new_prim()
# if we get here, all disjuncts failed
if error is not None:
raise error
class ArgListDisjunction(Disjunction):
""" Disjunction of two or more argument lists """
pass
class ArgSlot(object):
""" Description of the requirements that a Primitive demands from an
argument or keyword argument. An ArgSlot is filled at runtime, based
on the block program structure. """
def __init__(self, type_, call_arg=True, wrapper=None):
"""
type_ -- what type of the type hierarchy the argument should have
(after the wrapper has been applied)
call_arg -- if this argument is callable, should it be called and
its return value passed to the parent Primitive (True, the
default), or should it be passed as it is (False)?
wrapper -- a Primitive that is 'wrapped around' the argument before
it gets passed to its parent Primitive. Wrappers can be nested
infinitely. """
self.type = type_
self.call_arg = call_arg
self.wrapper = wrapper
def __repr__(self):
s = ["ArgSlot(type="]
s.append(repr(self.type))
if not self.call_arg:
s.append(", call=")
s.append(repr(self.call_arg))
if self.wrapper is not None:
s.append(", wrapper=")
s.append(repr(self.wrapper))
s.append(")")
return "".join(s)
def get_alternatives(self):
""" Return a tuple of slot alternatives, i.e. (self, ) """
return (self, )
def fill(self, argument, convert_to_ast=False, call_my_args=True):
""" Try to fill this argument slot with the given argument. Return
a ConstantArg containing the result. If there is a type problem,
raise a TATypeError. """
if isinstance(argument, ast.AST):
convert_to_ast = True
# 1. can the argument be called?
(func_disjunction, args) = (None, [])
if isinstance(argument, tuple) and argument and callable(argument[0]):
func_disjunction = argument[0]
if len(argument) >= 2 and isinstance(argument[1], LogoCode):
args = argument[2:]
else:
args = argument[1:]
elif callable(argument):
func_disjunction = argument
# make sure we can loop over func_disjunction
if not isinstance(func_disjunction, PrimitiveDisjunction):
func_disjunction = PrimitiveDisjunction((func_disjunction, ))
error = None
bad_value = argument # the value that caused the TATypeError
for func in func_disjunction:
error = None
for slot in self.get_alternatives():
if isinstance(slot.wrapper, PrimitiveDisjunction):
wrapper_disjunction = slot.wrapper
else:
wrapper_disjunction = PrimitiveDisjunction((slot.wrapper,))
for wrapper in wrapper_disjunction:
# check if the argument can fill this slot (type-wise)
# (lambda functions are always accepted)
if getattr(func, '__name__', None) == '<lambda>':
converter = identity
old_type = TYPE_OBJECT
new_type = slot.type
else:
if wrapper is not None:
arg_types = get_type(wrapper)[0]
bad_value = wrapper
elif func is not None:
arg_types = get_type(func)[0]
bad_value = func
else:
arg_types = get_type(argument)[0]
bad_value = argument
converter = None
if not isinstance(arg_types, TypeDisjunction):
arg_types = TypeDisjunction((arg_types, ))
if isinstance(slot.type, TypeDisjunction):
slot_types = slot.type
else:
slot_types = TypeDisjunction((slot.type, ))
for old_type in arg_types:
for new_type in slot_types:
converter = get_converter(old_type, new_type)
if converter is not None:
break
if converter is not None:
break
# unable to convert, try next wrapper/ slot/ func
if converter is None:
continue
# 1. (cont'd) call the argument or pass it on as a callable
called_argument = argument
if func is not None:
func_prim = func
if not isinstance(func_prim, Primitive):
func_prim = Primitive(
func_prim,
[ArgSlot(TYPE_OBJECT)] * len(args))
try:
func_prim = func_prim.fill_slots(
args,
convert_to_ast=convert_to_ast,
call_my_args=(slot.call_arg and call_my_args))
except TATypeError as e:
error = e
if Primitive._DEBUG:
traceback.print_exc()
# on failure, try next wrapper/ slot/ func
bad_value = error.bad_value
continue
if convert_to_ast:
called_argument = func_prim.get_ast()
else:
if slot.call_arg and call_my_args:
# call and pass on the return value
called_argument = func_prim()
else:
# don't call and pass on the callable
called_argument = func_prim
# 2. apply any wrappers
wrapped_argument = called_argument
if wrapper is not None:
if convert_to_ast:
if not hasattr(wrapper, "get_ast"):
raise PyExportError(
("cannot convert callable"
" %s to an AST") % (repr(wrapper)))
wrapped_argument = wrapper.get_ast(
called_argument)
else:
if slot.call_arg and call_my_args:
wrapped_argument = wrapper(called_argument)
else:
wrapped_argument = wrapper.fill_slots(
[called_argument], call_my_args=False)
# last chance to convert raw values to ASTs
# (but not lists of ASTs)
if convert_to_ast and not \
isinstance(wrapped_argument, ast.AST) and not \
(isinstance(wrapped_argument, list
) and wrapped_argument and isinstance(
wrapped_argument[0], ast.AST)):
wrapped_argument = value_to_ast(wrapped_argument)
# 3. check the type and convert the argument if necessary
converted_argument = wrapped_argument
if slot.call_arg and call_my_args:
try:
converted_argument = convert(
wrapped_argument,
new_type, old_type=old_type,
converter=converter)
except TATypeError as e:
error = e
if Primitive._DEBUG:
traceback.print_exc()
# on failure, try next wrapper/ slot/ func
bad_value = wrapped_argument
continue
elif converter != identity:
converted_argument = Primitive(
converter,
return_type=new_type,
arg_descs=[ConstantArg(wrapped_argument,
value_type=old_type,
call_arg=False)])
# on success, return the result
return ConstantArg(
converted_argument,
value_type=new_type,
call_arg=(slot.call_arg and call_my_args))
# if we haven't returned anything yet, then all alternatives failed
if error is not None:
raise error
else:
raise TATypeError(bad_value=bad_value, bad_type=old_type,
req_type=new_type)
class ArgSlotDisjunction(Disjunction, ArgSlot):
""" Disjunction of two or more argument slots """
pass
class ConstantArg(object):
""" A constant argument or keyword argument to a Primitive. It is
independent of the block program structure. """
def __init__(self, value, call_arg=True, value_type=None):
""" call_arg -- call the value before returning it?
value_type -- the type of the value (from the TA type system). This
is useful to store e.g., the return type of call ASTs. """
self.value = value
self.call_arg = call_arg
self.value_type = value_type
def get(self, convert_to_ast=False):
""" If call_arg is True and the value is callable, call the value
and return its return value. Else, return the value unchanged.
convert_to_ast -- return the equivalent AST instead of a raw value """
if self.call_arg and callable(self.value):
if convert_to_ast:
return value_to_ast(self.value)
else:
return self.value()
else:
if convert_to_ast and not isinstance(self.value, list):
return value_to_ast(self.value)
else:
return self.value
def get_value_type(self):
""" If this ConstantArg has stored the type of its value, return
that. Else, use get_type(...) to guess the type of the value. """
if self.value_type is None:
return get_type(self.value)[0]
else:
return self.value_type
def __repr__(self):
s = ["ConstantArg("]
s.append(repr(self.value))
if not self.call_arg:
s.append(", call=")
s.append(repr(self.call_arg))
s.append(")")
return "".join(s)
def or_(*disjuncts):
""" Return a disjunction object of the same type as the disjuncts. If
the item type cannot be linked to a Disjunction class, return a tuple
of the disjuncts. """
if isinstance(disjuncts[0], Primitive):
return PrimitiveDisjunction(disjuncts)
elif isinstance(disjuncts[0], (list, ArgListDisjunction)):
return ArgListDisjunction(disjuncts)
elif isinstance(disjuncts[0], ArgSlot):
return ArgSlotDisjunction(disjuncts)
elif isinstance(disjuncts[0], Type):
return TypeDisjunction(disjuncts)
else:
return tuple(disjuncts)
def value_to_ast(value, *args_for_prim, **kwargs_for_prim):
""" Turn a value into an AST. Supported types: Primitive, int, float,
bool, basestring, list
If the value is already an AST, return it unchanged.
If the value is a non-exportable Primitive, return None. """
# already an AST
if isinstance(value, ast.AST):
return value
# Primitive
elif isinstance(value, Primitive):
if value.export_me:
return value.get_ast(*args_for_prim, **kwargs_for_prim)
else:
return None
# boolean
elif isinstance(value, bool):
return ast.Name(id=str(value), ctx=ast.Load)
# number
elif isinstance(value, (int, float)):
return ast.Num(n=value)
# string
elif isinstance(value, str):
return ast.Str(value)
# list (recursively transform to an AST)
elif isinstance(value, list):
ast_list = []
for item in value:
item_ast = value_to_ast(item)
if item_ast is not None:
ast_list.append(item_ast)
return ast.List(elts=ast_list, ctx=ast.Load)
# color
elif isinstance(value, Color):
# call to the Color constructor with this object's values,
# e.g., Color('red', 0, 50, 100)
return get_call_ast('Color', [value.name, value.color,
value.shade, value.gray],
return_type=TYPE_COLOR)
# vector
elif isinstance(value, Vector):
# call to the Vector constructor with this object's values,
# e.g., Vector('banana', [105, 1, 27, 3, 0])
return get_call_ast('Vector', [value.name, value.vector],
return_type=TYPE_VECTOR)
# media
elif isinstance(value, Media):
args = [value_to_ast(value.type), value_to_ast(value.value)]
return get_call_ast('Media', args, return_type=TYPE_MEDIA)
# unknown
else:
raise PyExportError("unknown type of raw value: " + repr(type(value)))
def ast_yield_true():
return ast.Yield(value=ast.Name(id='True', ctx=ast.Load))
def export_me(something):
""" Return True iff this is not a Primitive or its export_me attribute
is True, i.e. everything is exportable except for Primitives with
export_me == False """
return not isinstance(something, Primitive) or something.export_me
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
from datetime import datetime
from flask import current_app as app, flash, Markup, redirect
from flask_appbuilder import CompactCRUDMixin, expose
from flask_appbuilder.fieldwidgets import Select2Widget
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import has_access
from flask_babel import lazy_gettext as _
from werkzeug.exceptions import NotFound
from wtforms import StringField
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from superset import db, security_manager
from superset.connectors.base.views import BS3TextFieldROWidget, DatasourceModelView
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.druid import models
from superset.constants import RouteMethod
from superset.typing import FlaskResponse
from superset.utils import core as utils
from superset.views.base import (
BaseSupersetView,
DatasourceFilter,
DeleteMixin,
get_dataset_exist_error_msg,
ListWidgetWithCheckboxes,
SupersetModelView,
validate_json,
YamlExportMixin,
)
logger = logging.getLogger(__name__)
class EnsureEnabledMixin:
@staticmethod
def is_enabled() -> bool:
return bool(app.config["DRUID_IS_ACTIVE"])
@before_request
def ensure_enabled(self) -> None:
if not self.is_enabled():
raise NotFound()
class DruidColumnInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, EnsureEnabledMixin, SupersetModelView,
):
datamodel = SQLAInterface(models.DruidColumn)
include_route_methods = RouteMethod.RELATED_VIEW_SET
list_title = _("Columns")
show_title = _("Show Druid Column")
add_title = _("Add Druid Column")
edit_title = _("Edit Druid Column")
list_widget = ListWidgetWithCheckboxes
edit_columns = [
"column_name",
"verbose_name",
"description",
"dimension_spec_json",
"datasource",
"groupby",
"filterable",
]
add_columns = edit_columns
list_columns = ["column_name", "verbose_name", "type", "groupby", "filterable"]
can_delete = False
page_size = 500
label_columns = {
"column_name": _("Column"),
"type": _("Type"),
"datasource": _("Datasource"),
"groupby": _("Groupable"),
"filterable": _("Filterable"),
}
description_columns = {
"filterable": _(
"Whether this column is exposed in the `Filters` section "
"of the explore view."
),
"dimension_spec_json": utils.markdown(
"this field can be used to specify "
"a `dimensionSpec` as documented [here]"
"(http://druid.io/docs/latest/querying/dimensionspecs.html). "
"Make sure to input valid JSON and that the "
"`outputName` matches the `column_name` defined "
"above.",
True,
),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session.query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
def pre_update(self, item: "DruidColumnInlineView") -> None:
# If a dimension spec JSON is given, ensure that it is
# valid JSON and that `outputName` is specified
if item.dimension_spec_json:
try:
dimension_spec = json.loads(item.dimension_spec_json)
except ValueError as ex:
raise ValueError("Invalid Dimension Spec JSON: " + str(ex)) from ex
if not isinstance(dimension_spec, dict):
raise ValueError("Dimension Spec must be a JSON object")
if "outputName" not in dimension_spec:
raise ValueError("Dimension Spec does not contain `outputName`")
if "dimension" not in dimension_spec:
raise ValueError("Dimension Spec is missing `dimension`")
# `outputName` should be the same as the `column_name`
if dimension_spec["outputName"] != item.column_name:
raise ValueError(
"`outputName` [{}] unequal to `column_name` [{}]".format(
dimension_spec["outputName"], item.column_name
)
)
def post_update(self, item: "DruidColumnInlineView") -> None:
item.refresh_metrics()
def post_add(self, item: "DruidColumnInlineView") -> None:
self.post_update(item)
class DruidMetricInlineView( # pylint: disable=too-many-ancestors
CompactCRUDMixin, EnsureEnabledMixin, SupersetModelView,
):
datamodel = SQLAInterface(models.DruidMetric)
include_route_methods = RouteMethod.RELATED_VIEW_SET
list_title = _("Metrics")
show_title = _("Show Druid Metric")
add_title = _("Add Druid Metric")
edit_title = _("Edit Druid Metric")
list_columns = ["metric_name", "verbose_name", "metric_type"]
edit_columns = [
"metric_name",
"description",
"verbose_name",
"metric_type",
"json",
"datasource",
"d3format",
"warning_text",
]
add_columns = edit_columns
page_size = 500
validators_columns = {"json": [validate_json]}
description_columns = {
"metric_type": utils.markdown(
"use `postagg` as the metric type if you are defining a "
"[Druid Post Aggregation]"
"(http://druid.io/docs/latest/querying/post-aggregations.html)",
True,
)
}
label_columns = {
"metric_name": _("Metric"),
"description": _("Description"),
"verbose_name": _("Verbose Name"),
"metric_type": _("Type"),
"json": _("JSON"),
"datasource": _("Druid Datasource"),
"warning_text": _("Warning Message"),
}
add_form_extra_fields = {
"datasource": QuerySelectField(
"Datasource",
query_factory=lambda: db.session.query(models.DruidDatasource),
allow_blank=True,
widget=Select2Widget(extra_classes="readonly"),
)
}
edit_form_extra_fields = add_form_extra_fields
class DruidClusterModelView( # pylint: disable=too-many-ancestors
EnsureEnabledMixin, SupersetModelView, DeleteMixin, YamlExportMixin,
):
datamodel = SQLAInterface(models.DruidCluster)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Druid Clusters")
show_title = _("Show Druid Cluster")
add_title = _("Add Druid Cluster")
edit_title = _("Edit Druid Cluster")
add_columns = [
"verbose_name",
"broker_host",
"broker_port",
"broker_user",
"broker_pass",
"broker_endpoint",
"cache_timeout",
"cluster_name",
]
edit_columns = add_columns
list_columns = ["cluster_name", "metadata_last_refreshed"]
search_columns = ("cluster_name",)
label_columns = {
"cluster_name": _("Cluster Name"),
"broker_host": _("Broker Host"),
"broker_port": _("Broker Port"),
"broker_user": _("Broker Username"),
"broker_pass": _("Broker Password"),
"broker_endpoint": _("Broker Endpoint"),
"verbose_name": _("Verbose Name"),
"cache_timeout": _("Cache Timeout"),
"metadata_last_refreshed": _("Metadata Last Refreshed"),
}
description_columns = {
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this cluster. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the global timeout if undefined."
),
"broker_user": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
"broker_pass": _(
"Druid supports basic authentication. See "
"[auth](http://druid.io/docs/latest/design/auth.html) and "
"druid-basic-security extension"
),
}
yaml_dict_key = "databases"
def pre_add(self, item: "DruidClusterModelView") -> None:
security_manager.add_permission_view_menu("database_access", item.perm)
def pre_update(self, item: "DruidClusterModelView") -> None:
self.pre_add(item)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
class DruidDatasourceModelView( # pylint: disable=too-many-ancestors
EnsureEnabledMixin, DatasourceModelView, DeleteMixin, YamlExportMixin,
):
datamodel = SQLAInterface(models.DruidDatasource)
include_route_methods = RouteMethod.CRUD_SET
list_title = _("Druid Datasources")
show_title = _("Show Druid Datasource")
add_title = _("Add Druid Datasource")
edit_title = _("Edit Druid Datasource")
list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
order_columns = ["datasource_link", "modified"]
related_views = [DruidColumnInlineView, DruidMetricInlineView]
edit_columns = [
"datasource_name",
"cluster",
"description",
"owners",
"is_hidden",
"filter_select_enabled",
"fetch_values_from",
"default_endpoint",
"offset",
"cache_timeout",
]
search_columns = ("datasource_name", "cluster", "description", "owners")
add_columns = edit_columns
show_columns = add_columns + ["perm", "slices"]
page_size = 500
base_order = ("datasource_name", "asc")
description_columns = {
"slices": _(
"The list of charts associated with this table. By "
"altering this datasource, you may change how these associated "
"charts behave. "
"Also note that charts need to point to a datasource, so "
"this form will fail at saving if removing charts from a "
"datasource. If you want to change the datasource for a chart, "
"overwrite the chart from the 'explore view'"
),
"offset": _("Timezone offset (in hours) for this datasource"),
"description": Markup(
'Supports <a href="'
'https://daringfireball.net/projects/markdown/">markdown</a>'
),
"fetch_values_from": _(
"Time expression to use as a predicate when retrieving "
"distinct values to populate the filter component. "
"Only applies when `Enable Filter Select` is on. If "
"you enter `7 days ago`, the distinct list of values in "
"the filter will be populated based on the distinct value over "
"the past week"
),
"filter_select_enabled": _(
"Whether to populate the filter's dropdown in the explore "
"view's filter section with a list of distinct values fetched "
"from the backend on the fly"
),
"default_endpoint": _(
"Redirects to this endpoint when clicking on the datasource "
"from the datasource list"
),
"cache_timeout": _(
"Duration (in seconds) of the caching timeout for this datasource. "
"A timeout of 0 indicates that the cache never expires. "
"Note this defaults to the cluster timeout if undefined."
),
}
base_filters = [["id", DatasourceFilter, lambda: []]]
label_columns = {
"slices": _("Associated Charts"),
"datasource_link": _("Data Source"),
"cluster": _("Cluster"),
"description": _("Description"),
"owners": _("Owners"),
"is_hidden": _("Is Hidden"),
"filter_select_enabled": _("Enable Filter Select"),
"default_endpoint": _("Default Endpoint"),
"offset": _("Time Offset"),
"cache_timeout": _("Cache Timeout"),
"datasource_name": _("Datasource Name"),
"fetch_values_from": _("Fetch Values From"),
"changed_by_": _("Changed By"),
"modified": _("Modified"),
}
edit_form_extra_fields = {
"cluster": QuerySelectField(
"Cluster",
query_factory=lambda: db.session.query(models.DruidCluster),
widget=Select2Widget(extra_classes="readonly"),
),
"datasource_name": StringField(
"Datasource Name", widget=BS3TextFieldROWidget()
),
}
def pre_add(self, item: "DruidDatasourceModelView") -> None:
with db.session.no_autoflush:
query = db.session.query(models.DruidDatasource).filter(
models.DruidDatasource.datasource_name == item.datasource_name,
models.DruidDatasource.cluster_id == item.cluster_id,
)
if db.session.query(query.exists()).scalar():
raise Exception(get_dataset_exist_error_msg(item.full_name))
def post_add(self, item: "DruidDatasourceModelView") -> None:
item.refresh_metrics()
security_manager.add_permission_view_menu("datasource_access", item.get_perm())
if item.schema:
security_manager.add_permission_view_menu("schema_access", item.schema_perm)
def post_update(self, item: "DruidDatasourceModelView") -> None:
self.post_add(item)
def _delete(self, pk: int) -> None:
DeleteMixin._delete(self, pk)
class Druid(EnsureEnabledMixin, BaseSupersetView):
"""The base views for Superset!"""
@has_access
@expose("/refresh_datasources/")
def refresh_datasources( # pylint: disable=no-self-use
self, refresh_all: bool = True
) -> FlaskResponse:
"""endpoint that refreshes druid datasources metadata"""
session = db.session()
DruidCluster = ConnectorRegistry.sources[ # pylint: disable=invalid-name
"druid"
].cluster_class
for cluster in session.query(DruidCluster).all():
cluster_name = cluster.cluster_name
valid_cluster = True
try:
cluster.refresh_datasources(refresh_all=refresh_all)
except Exception as ex: # pylint: disable=broad-except
valid_cluster = False
flash(
"Error while processing cluster '{}'\n{}".format(
cluster_name, utils.error_msg_from_exception(ex)
),
"danger",
)
logger.exception(ex)
if valid_cluster:
cluster.metadata_last_refreshed = datetime.now()
flash(
_("Refreshed metadata from cluster [{}]").format(
cluster.cluster_name
),
"info",
)
session.commit()
return redirect("/druiddatasourcemodelview/list/")
@has_access
@expose("/scan_new_datasources/")
def scan_new_datasources(self) -> FlaskResponse:
"""
Calling this endpoint will cause a scan for new
datasources only and add them.
"""
return self.refresh_datasources(refresh_all=False)
| |
import cPickle
import random
import ToonInteriorColors
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from direct.task.Task import Task
from pandac.PandaModules import *
from toontown.toonbase import TTLocalizer
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase.ToontownGlobals import *
from toontown.dna.DNAParser import DNADoor
from toontown.toon.DistributedNPCToonBase import DistributedNPCToonBase
class DistributedHQInterior(DistributedObject.DistributedObject):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.dnaStore = cr.playGame.dnaStore
self.leaderAvIds = []
self.leaderNames = []
self.leaderScores = []
self.numLeaders = 10
self.tutorial = 0
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.interior = loader.loadModel('phase_3.5/models/modules/HQ_interior')
self.interior.reparentTo(render)
self.interior.find('**/cream').hide()
self.interior.find('**/crashed_piano').hide()
self.buildLeaderBoard()
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.setupDoors()
self.interior.flattenMedium()
emptyBoard = self.interior.find('**/empty_board')
self.leaderBoard.reparentTo(emptyBoard.getChild(0))
for npcToon in self.cr.doFindAllInstances(DistributedNPCToonBase):
npcToon.initToonState()
def setTutorial(self, flag):
if self.tutorial == flag:
return
else:
self.tutorial = flag
if self.tutorial:
self.interior.find('**/periscope').hide()
self.interior.find('**/speakers').hide()
else:
self.interior.find('**/periscope').show()
self.interior.find('**/speakers').show()
def setZoneIdAndBlock(self, zoneId, block):
self.zoneId = zoneId
self.block = block
def buildLeaderBoard(self):
self.leaderBoard = hidden.attachNewNode('leaderBoard')
self.leaderBoard.setPosHprScale(0.1, 0, 4.5, 90, 0, 0, 0.9, 0.9, 0.9)
z = 0
row = self.buildTitleRow()
row.reparentTo(self.leaderBoard)
row.setPos(0, 0, z)
z -= 1
self.nameTextNodes = []
self.scoreTextNodes = []
self.trophyStars = []
for i in xrange(self.numLeaders):
(row, nameText, scoreText, trophyStar) = self.buildLeaderRow()
self.nameTextNodes.append(nameText)
self.scoreTextNodes.append(scoreText)
self.trophyStars.append(trophyStar)
row.reparentTo(self.leaderBoard)
row.setPos(0, 0, z)
z -= 1
def updateLeaderBoard(self):
taskMgr.remove(self.uniqueName('starSpinHQ'))
for i in xrange(len(self.leaderNames)):
name = self.leaderNames[i]
score = self.leaderScores[i]
self.nameTextNodes[i].setText(name)
self.scoreTextNodes[i].setText(str(score))
self.updateTrophyStar(self.trophyStars[i], score)
for i in xrange(len(self.leaderNames), self.numLeaders):
self.nameTextNodes[i].setText('-')
self.scoreTextNodes[i].setText('-')
self.trophyStars[i].hide()
def buildTitleRow(self):
row = hidden.attachNewNode('leaderRow')
nameText = TextNode('titleRow')
nameText.setFont(ToontownGlobals.getSignFont())
nameText.setAlign(TextNode.ACenter)
nameText.setTextColor(0.5, 0.75, 0.7, 1)
nameText.setText(TTLocalizer.LeaderboardTitle)
namePath = row.attachNewNode(nameText)
namePath.setPos(0, 0, 0)
return row
def buildLeaderRow(self):
row = hidden.attachNewNode('leaderRow')
nameText = TextNode('nameText')
nameText.setFont(ToontownGlobals.getToonFont())
nameText.setAlign(TextNode.ALeft)
nameText.setTextColor(1, 1, 1, 0.7)
nameText.setText('-')
namePath = row.attachNewNode(nameText)
namePath.setPos(*TTLocalizer.DHQInamePathPos)
namePath.setScale(TTLocalizer.DHQInamePath)
scoreText = TextNode('scoreText')
scoreText.setFont(ToontownGlobals.getToonFont())
scoreText.setAlign(TextNode.ARight)
scoreText.setTextColor(1, 1, 0.1, 0.7)
scoreText.setText('-')
scorePath = row.attachNewNode(scoreText)
scorePath.setPos(*TTLocalizer.DHQIscorePathPos)
trophyStar = self.buildTrophyStar()
trophyStar.reparentTo(row)
return (row, nameText, scoreText, trophyStar)
def setLeaderBoard(self, leaderData):
(avIds, names, scores) = cPickle.loads(leaderData)
self.notify.debug('setLeaderBoard: avIds: %s, names: %s, scores: %s' % (avIds, names, scores))
self.leaderAvIds = avIds
self.leaderNames = names
self.leaderScores = scores
self.updateLeaderBoard()
def chooseDoor(self):
doorModelName = 'door_double_round_ul'
if doorModelName[-1:] == 'r':
doorModelName = doorModelName[:-1] + 'l'
else:
doorModelName = doorModelName[:-1] + 'r'
door = self.dnaStore.findNode(doorModelName)
return door
def setupDoors(self):
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.zoneId)
self.colors = ToonInteriorColors.colors[ToontownCentral]
door = self.chooseDoor()
doorOrigins = render.findAllMatches('**/door_origin*')
numDoorOrigins = doorOrigins.getNumPaths()
for npIndex in xrange(numDoorOrigins):
doorOrigin = doorOrigins[npIndex]
doorOriginNPName = doorOrigin.getName()
doorOriginIndexStr = doorOriginNPName[len('door_origin_'):]
newNode = ModelNode('door_' + doorOriginIndexStr)
newNodePath = NodePath(newNode)
newNodePath.reparentTo(self.interior)
doorNP = door.copyTo(newNodePath)
doorOrigin.setScale(0.8, 0.8, 0.8)
doorOrigin.setPos(doorOrigin, 0, -0.025, 0)
doorColor = self.randomGenerator.choice(self.colors['TI_door'])
triggerId = str(self.block) + '_' + doorOriginIndexStr
DNADoor.setupDoor(doorNP, newNodePath, doorOrigin, self.dnaStore, triggerId, doorColor)
doorFrame = doorNP.find('door_*_flat')
doorFrame.setColor(doorColor)
del self.dnaStore
del self.randomGenerator
def disable(self):
self.leaderBoard.removeNode()
del self.leaderBoard
self.interior.removeNode()
del self.interior
del self.nameTextNodes
del self.scoreTextNodes
del self.trophyStars
taskMgr.remove(self.uniqueName('starSpinHQ'))
DistributedObject.DistributedObject.disable(self)
def buildTrophyStar(self):
trophyStar = loader.loadModel('phase_3.5/models/gui/name_star')
trophyStar.hide()
trophyStar.setPos(*TTLocalizer.DHQItrophyStarPos)
return trophyStar
def updateTrophyStar(self, trophyStar, score):
scale = 0.8
if score >= ToontownGlobals.TrophyStarLevels[4]:
trophyStar.show()
trophyStar.setScale(scale)
trophyStar.setColor(ToontownGlobals.TrophyStarColors[4])
if score >= ToontownGlobals.TrophyStarLevels[5]:
task = taskMgr.add(self.__starSpin, self.uniqueName('starSpinHQ'))
task.trophyStarSpeed = 15
task.trophyStar = trophyStar
elif score >= ToontownGlobals.TrophyStarLevels[2]:
trophyStar.show()
trophyStar.setScale(0.75 * scale)
trophyStar.setColor(ToontownGlobals.TrophyStarColors[2])
if score >= ToontownGlobals.TrophyStarLevels[3]:
task = taskMgr.add(self.__starSpin, self.uniqueName('starSpinHQ'))
task.trophyStarSpeed = 10
task.trophyStar = trophyStar
elif score >= ToontownGlobals.TrophyStarLevels[0]:
trophyStar.show()
trophyStar.setScale(0.75 * scale)
trophyStar.setColor(ToontownGlobals.TrophyStarColors[0])
if score >= ToontownGlobals.TrophyStarLevels[1]:
task = taskMgr.add(self.__starSpin, self.uniqueName('starSpinHQ'))
task.trophyStarSpeed = 8
task.trophyStar = trophyStar
else:
trophyStar.hide()
def __starSpin(self, task):
now = globalClock.getFrameTime()
r = now * task.trophyStarSpeed % 360.0
task.trophyStar.setR(r)
return Task.cont
| |
from common import *
from jsonrpc import DataSource
import deduplication
import boar_exceptions
from copy import copy
""" A recipe has the following format:
{
"method": "concat",
"md5sum": "9b97d0a697dc503fb4c53ea01bd23dc7",
"size": 8469,
"pieces": [
{"source": "82a6c69d071b6d84b18912a2fa6725a4",
"offset": 0,
"size": 5000},
{"source": "c7eac275a3810a395fda6eeb7786c0e9",
"offset": 0,
"size": 3469}
]
}
"""
def create_blob_reader(recipe, repo):
assert recipe
return RecipeReader(recipe, repo)
class RecipeReader(DataSource):
def __init__(self, recipe, repo, offset = 0, size = None, local_path = None):
assert offset >= 0
assert size == None or size >= 0
assert recipe['method'] == "concat"
assert 'md5sum' in recipe and is_md5sum(recipe['md5sum'])
assert 'size' in recipe and recipe['size'] >= 0
self.repo = repo
self.local_path = local_path
self.progress_callback = lambda x: None
self.pieces = []
self.blob_paths = {} # Blob id -> blob path
self.file_handles = {} # blob -> handle
# Expand repeated pieces
piece_size_sum = 0
for piece in recipe['pieces']:
repeat = piece.get('repeat', 1)
for n in xrange(0, repeat):
piece_to_add = copy(piece)
piece_to_add['position_in_recipe'] = piece_size_sum
piece_size_sum += piece['size']
self.pieces.append(piece_to_add)
blob = piece['source']
blobpath = None
if self.local_path:
blobpath = os.path.join(self.local_path, blob)
if not blobpath or not os.path.exists(blobpath):
blobpath = self.repo.get_blob_path(blob)
if not os.path.exists(blobpath):
raise boar_exceptions.CorruptionError("A recipe (%s) refers to a missing blob (%s)" % (recipe['md5sum'], blob))
self.blob_paths[piece['source']] = blobpath
if piece_size_sum != recipe['size']:
raise boar_exceptions.CorruptionError("Recipe is internally inconsistent: %s" % recipe['md5sum'])
self.recipe_size = recipe['size']
if size == None:
self.segment_size = recipe['size'] - offset
else:
self.segment_size = size
self.bytes_left_in_segment = self.segment_size
self.segment_start_in_recipe = offset
self.current_piece_index = 0
assert self.segment_start_in_recipe + self.bytes_left_in_segment <= recipe['size']
@overrides(DataSource)
def bytes_left(self):
return self.bytes_left_in_segment
def __del__(self):
for f in self.file_handles.values():
f.close()
del self.file_handles
def __read_from_blob(self, blob, position, size):
blobpath = self.blob_paths[blob]
if blobpath not in self.file_handles:
for f in self.file_handles.values():
f.close()
self.file_handles.clear()
self.file_handles[blobpath] = open(blobpath, "rb")
f = self.file_handles[blobpath]
f.seek(position)
data = f.read(size)
#print "read_from_blob(blob=%s, position=%s, size=%s) => '%s'" % (blob, position, size, data)
return data
def __search_forward(self, pos, start_index = 0):
index = start_index
while True:
piece = self.pieces[index]
piece_start = piece['position_in_recipe']
piece_end = piece_start + piece['size']
if piece_start <= pos < piece_end:
break
index += 1
#print "search_forward(%s, %s) => %s" % (pos, start_index, index)
return index
def __read_piece_data(self, piece, recipe_pos, max_size):
piece_pos = recipe_pos - piece['position_in_recipe']
blob_pos = piece['offset'] + piece_pos
available_blob_data_size = piece['size'] - piece_pos
blob_read_size = min(available_blob_data_size, max_size)
return self.__read_from_blob(piece['source'], blob_pos, blob_read_size)
@overrides(DataSource)
def read(self, readsize = None):
assert self.bytes_left_in_segment >= 0
if readsize == None:
readsize = self.bytes_left_in_segment
readsize = min(self.bytes_left_in_segment, readsize)
assert readsize >= 0
result = ""
while len(result) < readsize:
#print self.segment_start_in_recipe, self.segment_size, self.bytes_left_in_segment
current_recipe_read_position = self.segment_start_in_recipe + (self.segment_size - self.bytes_left_in_segment)
self.current_piece_index = self.__search_forward(current_recipe_read_position, start_index = self.current_piece_index)
remaining = readsize - len(result)
data = self.__read_piece_data(self.pieces[self.current_piece_index], current_recipe_read_position, remaining)
result += data
self.bytes_left_in_segment -= len(data)
self.progress_callback(calculate_progress(self.segment_size, self.segment_size - self.bytes_left_in_segment))
return result
def set_progress_callback(self, progress_callback):
assert callable(progress_callback)
self.progress_callback = progress_callback
def benchmark():
import tempfile
blob_fo = tempfile.NamedTemporaryFile()
blob_path = blob_fo.name
class FakeRepo:
def get_blob_path(self, blob):
return blob_path
block_size = 65536
block_count = 10000
blob_fo.write("\0" * block_size)
recipe = {"md5sum": "00000000000000000000000000000000",
"method": "concat",
"size": block_size * block_count,
"pieces": [
{"source": "00000000000000000000000000000000",
"offset": 0,
"size": block_size,
"repeat": block_count
} ]
}
reader = RecipeReader(recipe, FakeRepo())
print block_size * block_count / (2**20), "Mbytes"
sw = StopWatch()
reader.read()
sw.mark("Read complete")
"""
62 Mbytes
SW: Read complete 0.33 (total 0.33)
125 Mbytes
SW: Read complete 1.14 (total 1.14)
250 Mbytes
SW: Read complete 4.29 (total 4.29)
625 Mbytes
SW: Read complete 26.12 (total 26.12)
"""
def simple_test():
import tempfile
blob_path = "/tmp/blobreader-test.txt"
class FakeRepo:
def get_blob_path(self, blob):
return blob_path
with open(blob_path, "w") as f:
f.write("abcdefghijklmnopqrstuvwxyz")
recipe = {"md5sum": "00000000000000000000000000000000",
"method": "concat",
"size": 9,
"pieces": [
{"source": "00000000000000000000000000000000",
"offset": 3,
"size": 3,
"repeat": 3
}
]
}
reader = RecipeReader(recipe, FakeRepo())
print reader.read()
if __name__ == "__main__":
benchmark()
#simple_test()
| |
"""
Generate a C++ DICOM dictionary from a text file.
This program will read a text file generated from the DICOM data
element regsistry table (DICOM Chapter 6 part 6) and will generate
a hash table that can be used for dictionary lookups.
Usage: python makedict.py nemadict.txt > vtkDICOMDictHash.cxx
Usage: python makedict.py --header nemadict.txt > vtkDICOMDictHash.h
The option "--private=name" can be added to create a private dictionary,
or "--private=vtkDICOMDictPrivate" for all default dictionaries.
"""
import sys
import math
header = \
"""/*=========================================================================
This is an automatically generated file. Include errata for any changes.
=========================================================================*/
"""
printheader = False
privatedict = False
filename = None
for arg in sys.argv[1:]:
if arg == "--header":
printheader = True
elif arg[0:10] == "--private=":
privatedict = arg[10:]
elif arg[0] != '-' and filename == None:
filename = arg
else:
sys.stderr.write(
"""usage: python makedict.py nemadict.txt > vtkDICOMDictHash.cxx
python makedict.py --header nemadict.txt > vtkDICOMDictHash.h\n""")
sys.exit(1)
# the hash table, in PYTHON
DICT_HASH_TABLE_SIZE = 4096
# whether to show individual hash rows in output
VISUALIZE_HASH_ROWS = False
# collect private dictionaries
privatelines = {}
# read the file in one go
f = open(filename, 'r')
lines = f.readlines()
f.close()
# look for repeating groups and repeat them
i = 0
rg = []
while i < len(lines):
tag = lines[i].strip()
try:
g, e = tag[1:10].split(',')
except ValueError:
sys.stderr.write("exception: %s\n" % (tag))
if g == "60xx" or g == "50xx":
rg.extend(lines[i:i+6])
i = i + 6
elif rg:
nb = []
for j in range(1,16):
k = 0
m = len(rg)
while k < m:
g, e = rg[k][1:10].split(',')
nb.append("(%s%02X,%s)\n" % (g[0:2], 2*j, e))
nb.append("%s %d\n" % (rg[k+1].strip(), j+1))
nb.append("%s%d\n" % (rg[k+2].strip(), j+1))
nb.append(rg[k+3])
nb.append(rg[k+4])
nb.append(rg[k+5])
k = k + 6
lines = lines[0:i] + nb + lines[i:]
i += len(nb)
rg = []
else:
# check for and filter out the private tags
private = False
try:
private = ((int(g[3]) & 0x1) != 0)
except:
pass
if private:
creator = lines[i + 5].strip()
try:
privatelines[creator] += lines[i:i+6]
except KeyError:
privatelines[creator] = lines[i:i+6]
i = i + 6
def hashtag(g, e):
"""Compute a hash from (group, element).
This was found by trial-and-error.
"""
k = (g << 16) + e
h = (k >> 15) + k
h = h + (h >> 6) + (h >> 12)
return (h & 0xffffffff)
def hashstring(s):
"""Compute a string hash based on the function "djb2".
Use at most 64 characters.
"""
h = 5381
s = s[0:64]
for c in s:
h = ((h << 5) + h + ord(c)) & 0xffffffff
return h
def hashstats(ht):
"""Return statistics for hash table, as a tuple:
(used bucket fraction, average linear search, total bytes used)
"""
m = len(ht)
f = m
d = m
c = 0
for l in ht:
if l == None:
f = f - 1
else:
d = d + len(l) + 1
c = c + len(l)//2
return (f/m, c/f, 2*(d + 1))
def makedict(lines, creator="DICOM"):
# the tables that will be created
enum_list = []
element_list = []
entry_list = []
# a set to keep track of all VM strings encountered
vms = {}
htsize = DICT_HASH_TABLE_SIZE
if privatedict:
htsize = int(len(lines)//6)
if htsize == 0:
htsize = 1
ht = [None]*htsize
ht2 = [None]*htsize
# iterate through all elements in the table
j = 0
i = 0
n = len(lines)
while i < n:
try:
tag = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
name = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
key = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
vr = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
vm = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
ret = lines[i].encode('ascii').strip().decode('ascii')
i = i + 1
except:
sys.stderr.write("non-ascii character encountered on line %d\n" % (i,))
raise TypeError
# replace "Unknown" and "?" with ""
if name in ("Unknown", "Internal", "?"):
name = ""
if key in ("Unknown", "Internal", "?"):
key = ""
# replace "US or SS" with "XS"
if vr in ("US or SS", "SS or US", "xs"):
vr = "XS"
# replace "OB or OW" with "OX"
if vr in ("OB or OW", "OW or OB", "ox"):
vr = "OX"
# replace "see note" with "XX"
if vr in ("", "see note", "See Note"):
vr = "XX"
# replace mixed short with "OW"
if len(vr) > 2:
if vr.find("OW") >= 0:
vr = "OW"
vm = "1"
if vr.find("OB") >= 0:
vr = "OB"
vm = "1"
# replace 'RET' with 1 or 0
if ret and not privatedict:
if not printheader:
ret = {"RET":"1","DICONDE":"2","DICOS":"3"}[ret]
elif ret == "RET":
ret = "1"
else:
ret = "0"
# prefix vm with 'M', change '-' to 'T', change 'n' to 'N'
vm = 'M' + vm.split(' ')[0].replace('-', 'T').replace('n', 'N')
# add to the set of VMs
vms[vm] = True
# this is debug info: make sure no keys are over 63 chars,
# which is the maximum id length in the C++ standard
if len(key) > 63:
print("XXXXXX %s" % (key,))
sys.exit(1)
# get the group, element
g, e = tag[1:10].split(',')
# make sure g, e are hexadecimal integers
try:
gi = int(g, 16)
ei = int(e, 16)
except:
# replace 'x' (which means any digit) with zero
#print("XXXXXX %s %s" % (tag, key))
g = g.replace('xx','00')
e = e.replace('xxxx','0000')
e = e.replace('xxx','000')
e = e.replace('xx','00')
e = e.replace('x','1')
gi = int(g, 16)
ei = int(e, 16)
if key or privatedict:
enum_list.append(
("%-39s = 0x%s%s, // %s %-5s %s" % (key, g, e, vr, vm, ret)).strip())
element_list.append(
"{ 0x%s, 0x%s, %s, VR::%s, VM::%s, \"%s\" }," % (g, e, ret, vr, vm, key))
# create a hash from group, element
h = hashtag(gi, ei)
# create a string hash
hkey = hashstring(key)
# build the hash tables
h = (h % htsize)
if ht[h] == None:
ht[h] = []
h2 = (hkey % htsize)
if ht2[h2] == None:
ht2[h2] = []
# build the index table
ht[h].append(j)
ht[h].append(ei)
ht2[h2].append(j)
ht2[h2].append((hkey//htsize) & 0xffff)
j = j + 1
# debug: print all VM's that were found
#print(vms.keys())
# debug: print statistics about the hash table
if not privatedict:
sys.stderr.write("Hash Stat: buckets used, items per bucket, total bytes\n")
sys.stderr.write("Tag Table: %f, %f, %d\n" % hashstats(ht))
sys.stderr.write("Key Table: %f, %f, %d\n" % hashstats(ht2))
return enum_list, element_list, ht, ht2
# write the output file
def printhead(enum_dict, classname):
f = sys.stdout
f.write(header)
f.write("\n")
f.write("#ifndef %s_h\n" % (classname,))
f.write("#define %s_h\n" % (classname,))
f.write("\n")
if not privatedict:
f.write("//! Tag values defined in the DICOM standard\n")
f.write("namespace DC\n")
f.write("{\n")
f.write("enum EnumType {\n")
for enum_list in enum_dict.values():
# eliminate the "," for the last enum item
m = len(enum_list)
if m:
enum_list[m-1] = enum_list[m-1].replace(", //", " //")
for l in enum_list:
f.write(l + "\n")
f.write("};\n")
f.write("} // end namespace DC\n")
else:
f.write("// This must be included before the initializer is declared.\n")
f.write("#include \"vtkDICOMDictionary.h\"\n")
f.write("\n")
f.write("// Initializer to add dict when header included.\n")
f.write("struct VTK_DICOM_EXPORT %sInitializer\n" % (classname,))
f.write("{\n")
f.write(" %sInitializer();\n" % (classname,))
f.write(" ~%sInitializer();\n" % (classname,))
f.write("};\n")
f.write("\n")
f.write("static %sInitializer %sInitializerInstance;\n" % (classname,classname))
f.write("\n")
f.write("#endif /* %s_h */\n" % (classname,))
def printbody(entry_dict, classname):
f = sys.stdout
f.write(header)
f.write("\n")
f.write("#include \"vtkDICOMDictionary.h\"\n")
f.write("#include \"%s.h\"\n" % (classname,))
f.write("\n")
f.write("namespace {\n")
f.write("\n")
f.write("typedef vtkDICOMVR VR;\n")
f.write("typedef vtkDICOMVM VM;\n")
f.write("typedef vtkDICOMDictEntry::Entry DictEntry;\n")
ns = ""
if not privatedict:
ns = "vtkDICOMDictionary::"
dn = 0
for name, (entry_list, tag_table, key_table) in entry_dict.items():
dn = dn + 1
ds = ""
f.write("\n")
if len(entry_dict) > 1:
ds = "%03d" % (dn,)
f.write("// ----- %s -----\n" % (name,))
f.write("\n")
f.write("const DictEntry Dict%sContents[] = {\n" % (ds,))
for l in entry_list:
f.write(l + "\n")
f.write("};\n")
for table,tagorkey in [(tag_table,"Tag"),(key_table,"Key")]:
f.write("\n")
f.write("const unsigned short Dict%s%sHashTable[] = {\n" % (ds,tagorkey))
i = 0
j = len(table) + 1
row = []
for l in table:
if l is None:
row.append("%5d," % (len(table),))
i = i + 1
if i % 10 == 0:
f.write(" ".join(row) + "\n")
row = []
else:
row.append("%5d," % (j,))
i = i + 1
if i % 10 == 0:
f.write(" ".join(row) + "\n")
row = []
j = j + len(l) + 1
row.append("%5d," % (0,))
i = i + 1
if VISUALIZE_HASH_ROWS:
i = 0
if i % 10 == 0:
f.write(" ".join(row) + "\n")
row = []
for l in table:
if not (l is None):
row.append("%5d," % (len(l)//2,))
i = i + 1
if i % 10 == 0:
f.write(" ".join(row) + "\n")
row = []
for j, e in enumerate(l):
row.append("%5d," % (e,))
i = i + 1
if VISUALIZE_HASH_ROWS and j == len(l) - 1:
i = 0
if i % 10 == 0:
f.write(" ".join(row) + "\n")
row = []
if i % 10 != 0:
f.write(" ".join(row) + "\n")
f.write("};\n")
if not privatedict:
f.write("\n")
f.write("} // end anonymous namespace\n")
f.write("\n")
if len(entry_dict) > 1:
ds = "%03d" % (dn,)
f.write("vtkDICOMDictionary::Dict %sDict%sData = {\n" % (ns,ds))
f.write("\"%s\",\n" % (name,))
f.write("%d,\n" % (len(tag_table),))
f.write("%d,\n" % (len(entry_list),))
f.write("Dict%sTagHashTable,\n" % (ds,))
f.write("Dict%sKeyHashTable,\n" % (ds,))
f.write("Dict%sContents\n" % (ds,))
f.write("};\n")
if privatedict:
f.write("\n")
f.write("vtkDICOMDictionary::Dict *PrivateDictData[] = {\n")
dn = 0
for item in entry_dict.items():
dn = dn + 1
f.write("&Dict%03dData," % (dn,))
if dn % 5 == 0:
f.write("\n")
f.write("NULL\n")
f.write("};\n")
f.write("\n")
f.write("} // end anonymous namespace\n")
f.write("\n")
f.write("static unsigned int %sInitializerCounter;\n" % (classname,))
f.write("\n")
f.write("%sInitializer::%sInitializer()\n" % (classname,classname))
f.write("{\n")
f.write(" if (%sInitializerCounter++ == 0)\n" % (classname,))
f.write(" {\n")
f.write(" for (vtkDICOMDictionary::Dict **dp = PrivateDictData; *dp != NULL; dp++)\n")
f.write(" {\n")
f.write(" vtkDICOMDictionary::AddPrivateDictionary(*dp);\n")
f.write(" }\n")
f.write(" }\n")
f.write("}\n")
f.write("\n")
f.write("%sInitializer::~%sInitializer()\n" % (classname,classname))
f.write("{\n")
f.write(" if (--%sInitializerCounter == 0)\n" % (classname,))
f.write(" {\n")
f.write(" for (vtkDICOMDictionary::Dict **dp = PrivateDictData; *dp != NULL; dp++)\n")
f.write(" {\n")
f.write(" vtkDICOMDictionary::RemovePrivateDictionary((*dp)->Name);\n")
f.write(" }\n")
f.write(" }\n")
f.write("}\n")
if privatedict:
enum_dict = {}
entry_dict = {}
for name, lines in privatelines.items():
enum_list, entry_list, tag_table, key_table = makedict(lines, name)
enum_dict[name] = enum_list
entry_dict[name] = (entry_list, tag_table, key_table)
if printheader:
printhead(enum_dict, privatedict)
else:
printbody(entry_dict, privatedict)
else:
enum_list, entry_list, tag_table, key_table = makedict(lines)
classname = "vtkDICOMDictHash"
if printheader:
printhead({"DICOM" : enum_list}, classname)
sys.stdout.write("// VTK-HeaderTest-Exclude: %s.h\n" % (classname,))
else:
printbody({"DICOM" : (entry_list, tag_table, key_table)}, classname)
# informative: these names represent a range of tag values
""" keys with ranges
(0020,3100 to 31FF) SourceImageIDs
(0028,04x0) RowsForNthOrderCoefficients
(0028,04x1) ColumnsForNthOrderCoefficients
(0028,04x2) CoefficientCoding
(0028,04x3) CoefficientCodingPointers
(0028,08x0) CodeLabel
(0028,08x2) NumberOfTables
(0028,08x3) CodeTableLocation
(0028,08x4) BitsForCodeWord
(0028,08x8) ImageDataLocation
(1000,xxx0) EscapeTriplet
(1000,xxx1) RunLengthTriplet
(1000,xxx2) HuffmanTableSize
(1000,xxx3) HuffmanTableTriplet
(1000,xxx4) ShiftTableSize
(1000,xxx5) ShiftTableTriplet
(1010,xxxx) ZonalMap
"""
| |
import sys
import threading
import weakref
# from django.utils.six.moves import xrange
from six.moves import xrange
if sys.version_info < (3, 4):
from .weakref_backports import WeakMethod
else:
from weakref import WeakMethod
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal(object):
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak-referencable.
Receivers must be able to accept keyword arguments.
If receivers have a dispatch_uid attribute, the receiver will
not be added if another receiver already exists with that
dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
of type Signal, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
# from django.conf import settings
# If DEBUG is on, check that we got a good receiver
# if settings.configured and settings.DEBUG:
if True:
import inspect
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
# Not all callables are inspectable with getargspec, so we'll
# try a couple different ways but in the end fall back on assuming
# it is -- we don't want to prevent registration of valid but weird
# callables.
try:
argspec = inspect.getargspec(receiver)
except TypeError:
try:
argspec = inspect.getargspec(receiver.__call__)
except (TypeError, AttributeError):
argspec = None
if argspec:
assert argspec[2] is not None, \
"Signal receivers must accept keyword arguments (**kwargs)."
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = WeakMethod
receiver_object = receiver.__self__
if sys.version_info >= (3, 4):
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
else:
receiver = ref(receiver, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, weak=True, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
weak
The weakref state to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
with self.lock:
self._clear_dead_receivers()
for index in xrange(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
del self.receivers[index]
break
self.sender_receivers_cache.clear()
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop, so it is quite possible to not have all
receivers called if a raises an error.
Arguments:
sender
The sender of the signal Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
for receiver in self._live_receivers(sender):
response = receiver(signal=self, sender=sender, **named)
responses.append((receiver, response))
return responses
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
responses = []
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return responses
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._gallery_images_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_by_gallery_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImagesOperations:
"""GalleryImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImage')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImage"]:
"""Create or update a gallery Image Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be created.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be created or updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the create or update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2018_06_01.models.GalleryImage
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2018_06_01.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> "_models.GalleryImage":
"""Retrieves information about a gallery Image Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are
to be retrieved.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be retrieved.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_06_01.models.GalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a gallery image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be deleted.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be deleted.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GalleryImageList"]:
"""List gallery Image Definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to
be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2018_06_01.models.GalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryImageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images'} # type: ignore
| |
import sys
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda import dnn
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
from pylearn2.expr.normalize import CrossChannelNormalization
import warnings
warnings.filterwarnings("ignore")
rng = np.random.RandomState(23455)
# set a fixed number for 2 purpose:
# 1. repeatable experiments; 2. for multiple-GPU, the same initial weights
class Weight(object):
def __init__(self, w_shape, mean=0, std=0.01):
super(Weight, self).__init__()
if std != 0:
self.np_values = np.asarray(
rng.normal(mean, std, w_shape), dtype=theano.config.floatX)
else:
self.np_values = np.cast[theano.config.floatX](
mean * np.ones(w_shape, dtype=theano.config.floatX))
self.val = theano.shared(value=self.np_values)
def save_weight(self, dir, name):
print 'weight saved: ' + name
np.save(dir + name + '.npy', self.val.get_value())
def load_weight(self, dir, name):
print 'weight loaded: ' + name
self.np_values = np.load(dir + name + '.npy')
self.val.set_value(self.np_values)
class DataLayer(object):
def __init__(self, input, image_shape, cropsize, rand, mirror, flag_rand):
'''
The random mirroring and cropping in this function is done for the
whole batch.
'''
# trick for random mirroring
mirror = input[:, :, ::-1, :]
input = T.concatenate([input, mirror], axis=0)
# crop images
center_margin = (image_shape[2] - cropsize) / 2
if flag_rand:
mirror_rand = T.cast(rand[2], 'int32')
crop_xs = T.cast(rand[0] * center_margin * 2, 'int32')
crop_ys = T.cast(rand[1] * center_margin * 2, 'int32')
else:
mirror_rand = 0
crop_xs = center_margin
crop_ys = center_margin
self.output = input[mirror_rand * 3:(mirror_rand + 1) * 3, :, :, :]
self.output = self.output[
:, crop_xs:crop_xs + cropsize, crop_ys:crop_ys + cropsize, :]
print "data layer with shape_in: " + str(image_shape)
class ConvPoolLayer(object):
def __init__(self, input, image_shape, filter_shape, convstride, padsize,
group, poolsize, poolstride, bias_init, lrn=False,
lib_conv='cudnn',
):
'''
lib_conv can be cudnn (recommended)or cudaconvnet
'''
self.filter_size = filter_shape
self.convstride = convstride
self.padsize = padsize
self.poolsize = poolsize
self.poolstride = poolstride
self.channel = image_shape[0]
self.lrn = lrn
self.lib_conv = lib_conv
assert group in [1, 2]
self.filter_shape = np.asarray(filter_shape)
self.image_shape = np.asarray(image_shape)
if self.lrn:
self.lrn_func = CrossChannelNormalization()
if group == 1:
self.W = Weight(self.filter_shape)
self.b = Weight(self.filter_shape[3], bias_init, std=0)
else:
self.filter_shape[0] = self.filter_shape[0] / 2
self.filter_shape[3] = self.filter_shape[3] / 2
self.image_shape[0] = self.image_shape[0] / 2
self.image_shape[3] = self.image_shape[3] / 2
self.W0 = Weight(self.filter_shape)
self.W1 = Weight(self.filter_shape)
self.b0 = Weight(self.filter_shape[3], bias_init, std=0)
self.b1 = Weight(self.filter_shape[3], bias_init, std=0)
if lib_conv == 'cudaconvnet':
self.conv_op = FilterActs(pad=self.padsize, stride=self.convstride,
partial_sum=1)
# Conv
if group == 1:
contiguous_input = gpu_contiguous(input)
contiguous_filters = gpu_contiguous(self.W.val)
conv_out = self.conv_op(contiguous_input, contiguous_filters)
conv_out = conv_out + self.b.val.dimshuffle(0, 'x', 'x', 'x')
else:
contiguous_input0 = gpu_contiguous(
input[:self.channel / 2, :, :, :])
contiguous_filters0 = gpu_contiguous(self.W0.val)
conv_out0 = self.conv_op(
contiguous_input0, contiguous_filters0)
conv_out0 = conv_out0 + \
self.b0.val.dimshuffle(0, 'x', 'x', 'x')
contiguous_input1 = gpu_contiguous(
input[self.channel / 2:, :, :, :])
contiguous_filters1 = gpu_contiguous(self.W1.val)
conv_out1 = self.conv_op(
contiguous_input1, contiguous_filters1)
conv_out1 = conv_out1 + \
self.b1.val.dimshuffle(0, 'x', 'x', 'x')
conv_out = T.concatenate([conv_out0, conv_out1], axis=0)
# ReLu
self.output = T.maximum(conv_out, 0)
conv_out = gpu_contiguous(conv_out)
# Pooling
if self.poolsize != 1:
self.pool_op = MaxPool(ds=poolsize, stride=poolstride)
self.output = self.pool_op(self.output)
elif lib_conv == 'cudnn':
input_shuffled = input.dimshuffle(3, 0, 1, 2) # c01b to bc01
# in01out to outin01
# print image_shape_shuffled
# print filter_shape_shuffled
if group == 1:
W_shuffled = self.W.val.dimshuffle(3, 0, 1, 2) # c01b to bc01
conv_out = dnn.dnn_conv(img=input_shuffled,
kerns=W_shuffled,
subsample=(convstride, convstride),
border_mode=padsize,
)
conv_out = conv_out + self.b.val.dimshuffle('x', 0, 'x', 'x')
else:
W0_shuffled = \
self.W0.val.dimshuffle(3, 0, 1, 2) # c01b to bc01
conv_out0 = \
dnn.dnn_conv(img=input_shuffled[:, :self.channel / 2,
:, :],
kerns=W0_shuffled,
subsample=(convstride, convstride),
border_mode=padsize,
)
conv_out0 = conv_out0 + \
self.b0.val.dimshuffle('x', 0, 'x', 'x')
W1_shuffled = \
self.W1.val.dimshuffle(3, 0, 1, 2) # c01b to bc01
conv_out1 = \
dnn.dnn_conv(img=input_shuffled[:, self.channel / 2:,
:, :],
kerns=W1_shuffled,
subsample=(convstride, convstride),
border_mode=padsize,
)
conv_out1 = conv_out1 + \
self.b1.val.dimshuffle('x', 0, 'x', 'x')
conv_out = T.concatenate([conv_out0, conv_out1], axis=1)
# ReLu
self.output = T.maximum(conv_out, 0)
# Pooling
if self.poolsize != 1:
self.output = dnn.dnn_pool(self.output,
ws=(poolsize, poolsize),
stride=(poolstride, poolstride))
self.output = self.output.dimshuffle(1, 2, 3, 0) # bc01 to c01b
else:
NotImplementedError("lib_conv can only be cudaconvnet or cudnn")
# LRN
if self.lrn:
# lrn_input = gpu_contiguous(self.output)
self.output = self.lrn_func(self.output)
if group == 1:
self.params = [self.W.val, self.b.val]
self.weight_type = ['W', 'b']
else:
self.params = [self.W0.val, self.b0.val, self.W1.val, self.b1.val]
self.weight_type = ['W', 'b', 'W', 'b']
print "conv ({}) layer with shape_in: {}".format(lib_conv,
str(image_shape))
class FCLayer(object):
def __init__(self, input, n_in, n_out):
self.W = Weight((n_in, n_out), std=0.005)
self.b = Weight(n_out, mean=0.1, std=0)
self.input = input
lin_output = T.dot(self.input, self.W.val) + self.b.val
self.output = T.maximum(lin_output, 0)
self.params = [self.W.val, self.b.val]
self.weight_type = ['W', 'b']
print 'fc layer with num_in: ' + str(n_in) + ' num_out: ' + str(n_out)
class DropoutLayer(object):
seed_common = np.random.RandomState(0) # for deterministic results
# seed_common = np.random.RandomState()
layers = []
def __init__(self, input, n_in, n_out, prob_drop=0.5):
self.prob_drop = prob_drop
self.prob_keep = 1.0 - prob_drop
self.flag_on = theano.shared(np.cast[theano.config.floatX](1.0))
self.flag_off = 1.0 - self.flag_on
seed_this = DropoutLayer.seed_common.randint(0, 2**31-1)
mask_rng = theano.tensor.shared_randomstreams.RandomStreams(seed_this)
self.mask = mask_rng.binomial(n=1, p=self.prob_keep, size=input.shape)
self.output = \
self.flag_on * T.cast(self.mask, theano.config.floatX) * input + \
self.flag_off * self.prob_keep * input
DropoutLayer.layers.append(self)
print 'dropout layer with P_drop: ' + str(self.prob_drop)
@staticmethod
def SetDropoutOn():
for i in range(0, len(DropoutLayer.layers)):
DropoutLayer.layers[i].flag_on.set_value(1.0)
@staticmethod
def SetDropoutOff():
for i in range(0, len(DropoutLayer.layers)):
DropoutLayer.layers[i].flag_on.set_value(0.0)
class SoftmaxLayer(object):
def __init__(self, input, n_in, n_out):
self.W = Weight((n_in, n_out))
self.b = Weight((n_out,), std=0)
self.p_y_given_x = T.nnet.softmax(
T.dot(input, self.W.val) + self.b.val)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.params = [self.W.val, self.b.val]
self.weight_type = ['W', 'b']
print 'softmax layer with num_in: ' + str(n_in) + \
' num_out: ' + str(n_out)
def negative_log_likelihood(self, y):
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def errors_top_x(self, y, num_top=5):
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
y_pred_top_x = T.argsort(self.p_y_given_x, axis=1)[:, -num_top:]
y_top_x = y.reshape((y.shape[0], 1)).repeat(num_top, axis=1)
return T.mean(T.min(T.neq(y_pred_top_x, y_top_x), axis=1))
else:
raise NotImplementedError()
| |
"""
Unit tests for the `ambient` module of ``TAMOC``
Provides testing of all of the functions, classes and methods in the `ambient`
module. These tests rely on data stored in the ``./data`` folder and will
write data to and read data from the ``./test/output`` folder.
"""
# S. Socolofsky, July 2013, Texas A&M University <socolofs@tamu.edu>.
from __future__ import (absolute_import, division, print_function)
import tamoc
from tamoc import ambient
import os
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_approx_equal
from scipy.interpolate import interp1d
from datetime import datetime
from netCDF4 import Dataset, date2num, num2date
# ----------------------------------------------------------------------------
# Functions used by unit tests
# ----------------------------------------------------------------------------
# Get a platform-independent path to the datafile
DATA_DIR = os.path.realpath(os.path.join(os.path.dirname(tamoc.__file__),'data'))
OUTPUT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__),'output'))
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
def get_units(data, units, nr, nc, mks_units, ans=None):
"""
Run the ambient.convert_units function and test that the data are
correctly converted per the inputs given above.
"""
# Apply the units conversion function
data, units = ambient.convert_units(data, units)
# Check shape of output compared to input
assert np.atleast_2d(data).shape[0] == nr
assert np.atleast_2d(data).shape[1] == nc
# Check units converted as expected
for i in range(len(units)):
assert units[i] == mks_units[i]
# Check numerical result is correct if known
if ans is not None:
assert_array_almost_equal(data, ans, decimal = 6)
# Send back the converted data
return (data, units)
def get_profile(data, z_col, z_start, p_col, P, z_min, z_max, nr, nc):
"""
Run the ambient.extract_profile function and test that the data are
correctly parsed per the inputs given above.
"""
# Apply the profile extraction function
prof_data = ambient.extract_profile(data, z_col=z_col, z_start=z_start,
p_col=p_col, P_atm=P)
# Check that the returned profile extends to the free surface
assert prof_data[0,z_col] == 0.0
# Check that the profile is clipped at the expected depths
assert_approx_equal(prof_data[1,z_col], z_min, significant = 6)
assert_approx_equal(prof_data[-1,z_col], z_max, significant = 6)
# Check that the returned profile is the right shape and data type
assert prof_data.shape[0] == nr
if nc is not None:
assert prof_data.shape[1] == nc
assert isinstance(prof_data, np.ndarray)
# Check the that returned profile is in ascending order
for i in range(1, prof_data.shape[0]):
assert prof_data[i,z_col] > prof_data[i-1,z_col]
# Send back the extracted profile
return prof_data
def check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time):
"""
Use the ambient.create_nc_db() function to create a netCDF4-classic
dataset from the given inputs and then check whether the dataset is
created properly.
"""
# Create the dataset
nc = ambient.create_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Access the variables in the dataset
time = nc.variables['time']
lat = nc.variables['lat']
lon = nc.variables['lon']
z = nc.variables['z']
T = nc.variables['temperature']
S = nc.variables['salinity']
P = nc.variables['pressure']
# Check that the global attributes are set correctly
assert nc.summary == summary
assert nc.source == source
assert nc.sea_name == sea_name
# Check that the imutable data are written properly
assert lat[0] == p_lat
assert lon[0] == p_lon
assert time[0] == p_time
assert z.shape == (0,)
# Check the units are correct on the following variables
assert z.units == 'm'
assert T.units == 'K'
assert S.units == 'psu'
assert P.units == 'Pa'
# Send back the template database
return nc
def get_filled_nc_db(nc, data, symbols, units, comments, z_col,
long_names, std_names):
"""
Check that data written to a netCDF dataset has been stored correctly.
"""
# Store the data in the netCDF dataset
z_len = nc.variables['z'][:].shape
nc = ambient.fill_nc_db(nc, data, symbols, units, comments, z_col)
# Check that data and metadata were stored properly
if len(symbols) == 1:
data = np.atleast_2d(data).transpose()
for i in range(len(symbols)):
assert_array_almost_equal(nc.variables[symbols[i]][:],
data[:,i], decimal = 6)
assert nc.variables[symbols[i]].long_name == long_names[i]
assert nc.variables[symbols[i]].standard_name == std_names[i]
assert nc.variables[symbols[i]].units == units[i]
assert nc.variables[symbols[i]].comment == comments[i]
# Send back the correctly-filled dataset
return nc
def get_profile_obj(nc, chem_names, chem_units):
"""
Check that an ambient.Profile object is created correctly and that the
methods operate as expected.
"""
if isinstance(chem_names, str):
chem_names = [chem_names]
if isinstance(chem_units, str):
chem_units = [chem_units]
# Create the profile object
prf = ambient.Profile(nc, chem_names=chem_names)
# Check the chemical names and units are correct
for i in range(len(chem_names)):
assert prf.chem_names[i] == chem_names[i]
assert prf.nchems == len(chem_names)
# Check the error criteria on the interpolator
assert prf.err == 0.01
# Check the get_units method
name_list = ['temperature', 'salinity', 'pressure'] + chem_names
unit_list = ['K', 'psu', 'Pa'] + chem_units
for i in range(len(name_list)):
assert prf.get_units(name_list[i])[0] == unit_list[i]
units = prf.get_units(name_list)
for i in range(len(name_list)):
assert units[i] == unit_list[i]
# Check the interpolator function ...
# Pick a point in the middle of the raw dataset and read off the depth
# and the values of all the variables
nz = prf.ds.dims['z'] // 2
z = prf.ds.coords['z'][nz]
y = np.zeros(len(name_list))
for name in name_list:
y[name_list.index(name)] = prf.ds[name].values[nz]
# Get an interpolated set of values at this same elevation
yp = prf.get_values(z, name_list)
# Check if the results are within the level of error expected by err
for i in range(len(name_list)):
assert np.abs((yp[i] - y[i]) / yp[i]) <= prf.err
# Next, check that the variables returned by the get_values function are
# the variables we expect
nz = prf.nc.variables['z'].shape[0] // 2
z = float(prf.nc.variables['z'][nz])
Tp, Sp, Pp = prf.get_values(z, ['temperature', 'salinity', 'pressure'])
T = prf.nc.variables['temperature'][nz]
S = prf.nc.variables['salinity'][nz]
P = prf.nc.variables['pressure'][nz]
assert np.abs((Tp - T) / T) <= prf.err
assert np.abs((Sp - S) / S) <= prf.err
assert np.abs((Pp - P) / P) <= prf.err
if prf.nchems > 0:
c = np.zeros(prf.nchems)
cp = np.zeros(prf.nchems)
for i in range(prf.nchems):
c[i] = prf.nc.variables[chem_names[i]][nz]
cp[i] = prf.get_values(z, chem_names[i])
assert np.abs((cp[i] - c[i]) / c[i]) <= prf.err
# Test the append() method by inserting the temperature data as a new
# profile, this time in degrees celsius using the variable name temp
n0 = prf.nchems
z = prf.ds.coords['z'].values
T = prf.ds['temperature'].values
T_degC = T - 273.15
assert_array_almost_equal(T_degC + 273.15, T, decimal = 6)
data = np.vstack((z, T_degC)).transpose()
symbols = ['z', 'temp']
units = ['m', 'deg C']
comments = ['measured', 'identical to temperature, but in deg C']
prf.append(data, symbols, units, comments, 0)
# Check that the data were inserted correctly
Tnc = prf.ds['temp'].values
assert_array_almost_equal(Tnc, T_degC+273.15, decimal = 6)
assert prf.nc.variables['temp'].units == 'deg C'
# Check that get_values works correctly with vector inputs for depth
depths = np.linspace(prf.nc.variables['z'].valid_min,
prf.nc.variables['z'].valid_max, 100)
Temps = prf.get_values(depths, ['temperature', 'temp'])
for i in range(len(depths)):
assert_approx_equal(Temps[i,0], Temps[i,1], significant = 6)
# Make sure the units are returned correctly
assert prf.get_units('temp')[0] == 'K'
assert prf.ds['temp'].attrs['units'] == 'K'
# Check that temp is now listed as a chemical
assert prf.nchems == n0 + 1
assert prf.chem_names[-1] == 'temp'
# Test the API for calculating the buoyancy frequency (note that we do
# not check the result, just that the function call does not raise an
# error)
N = prf.buoyancy_frequency(depths)
N = prf.buoyancy_frequency(depths[50], h=0.1)
# Send back the Profile object
return prf
def check_net_numpy(net_ds, num_ds):
"""
Check that an ambient.Profile object is created correctly and that the
methods operate as expected.
"""
chem_names = net_ds.chem_names
chem_units = net_ds.chem_units
# Check the chemical names and units are correct
for i in range(3):
assert num_ds.chem_names[i] in chem_names
assert num_ds.chem_units[i] in chem_units
assert num_ds.nchems == 4
# Check the error criteria on the interpolator
assert num_ds.err == 0.01
# Check the get_units method
name_list = ['temperature', 'salinity', 'pressure'] + chem_names[0:3]
unit_list = ['K', 'psu', 'Pa'] + chem_units[0:3]
for i in range(3):
assert num_ds.get_units(name_list[i])[0] == unit_list[i]
units = num_ds.get_units(name_list)
for i in range(3):
assert units[i] == unit_list[i]
# Check the interpolator function ...
z = np.linspace(num_ds.z_min, num_ds.z_max, 100)
# Next, check that the variables returned by the get_values function are
# the variables we expect
for depth in z:
assert num_ds.get_values(depth, 'temperature') == \
net_ds.get_values(depth, 'temperature')
assert num_ds.get_values(depth, 'salinity') == \
net_ds.get_values(depth, 'salinity')
assert num_ds.get_values(depth, 'pressure') == \
net_ds.get_values(depth, 'pressure')
# Test the append() method by inserting the temperature data as a new
# profile, this time in degrees celsius using the variable name temp
net_temp = net_ds.ds['temp'].values
num_temp = num_ds.ds['temp'].values
assert_array_almost_equal(net_temp, num_temp, decimal = 6)
assert num_ds.get_units('temp')[0] == 'K'
# Check that get_values works correctly with vector inputs for depth
Temps = num_ds.get_values(z, ['temperature', 'temp'])
for i in range(len(z)):
assert_approx_equal(Temps[i,0], Temps[i,1], significant = 6)
# Make sure the units are returned correctly
assert num_ds.get_units('temp')[0] == 'K'
# Check that temp is now listed as a chemical
assert num_ds.chem_names[-1] == 'temp'
# Test the API for calculating the buoyancy frequency (note that we do
# not check the result, just that the function call does not raise an
# error)
N_num = num_ds.buoyancy_frequency(z)
N_net = net_ds.buoyancy_frequency(z)
assert_array_almost_equal(N_num, N_net, decimal=6)
# ----------------------------------------------------------------------------
# Unit tests
# ----------------------------------------------------------------------------
def test_conv_units():
"""
Test the units conversion methods to make sure they produce the expected
results.
"""
# Test conversion of 2d array data
data = np.array([[10, 25.4, 9.5, 34], [100, 10.7, 8.4, 34.5]])
units = ['m', 'deg C', 'mg/l', 'psu']
mks_units = ['m', 'K', 'kg/m^3', 'psu']
ans = np.array([[1.00000000e+01, 2.98550000e+02, 9.50000000e-03,
3.40000000e+01],
[1.00000000e+02, 2.83850000e+02, 8.40000000e-03,
3.45000000e+01]])
data, units = get_units(data, units, 2, 4, mks_units, ans)
# Test conversion of scalar data
data = 10.
data, units = get_units(data, 'deg C', 1, 1, ['K'],
np.array([273.15+10.]))
# Test conversion of a row of data
data = [10, 25.4, 9.5, 34]
units = ['m', 'deg C', 'mg/l', 'psu']
mks_units = ['m', 'K', 'kg/m^3', 'psu']
ans = np.array([1.00000000e+01, 2.98550000e+02, 9.50000000e-03,
3.40000000e+01])
data, units = get_units(data, units, 1, 4, mks_units, ans)
# Test conversion of a column of data
data = np.array([[10., 20., 30., 40]]).transpose()
unit = 'deg C'
ans = np.array([[ 283.15], [293.15], [303.15], [313.15]])
data, units = get_units(data, unit, 4, 1, ['K'], ans)
def test_from_ctd():
"""
Test the ambient data methods on a Sea-Bird SBE 19plus Data File.
This unit test reads in the CTD data from ./data/ctd.BM54.cnv using
`numpy.loadtxt` and then uses this data to test the data manipulation and
storage methods in ambient.py.
"""
dfile = os.path.join(DATA_DIR,'ctd_BM54.cnv')
# Load in the raw data using np.loadtxt
raw = np.loadtxt(dfile, comments = '#', skiprows = 175,
usecols = (0, 1, 3, 8, 9, 10, 12))
# State the units of the input data (read by hand from the file)
units = ['deg C', 'db', 'mg/m^3', 'm', 'psu', 'kg/m^3', 'mg/l']
# State the equivalent mks units (translated here by hand)
mks_units = ['K', 'Pa', 'kg/m^3', 'm', 'psu', 'kg/m^3', 'kg/m^3']
# Clean the profile to remove depth reversals
z_col = 3
p_col = 1
profile = get_profile(raw, z_col, 50, p_col, 0., 2.124, 1529.789, 11074,
7)
# Convert the profile to standard units
profile, units = get_units(profile, units, 11074, 7, mks_units)
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_BM54.nc')
summary = 'Py.Test test file'
source = 'R/V Brooks McCall, station BM54'
sea_name = 'Gulf of Mexico'
p_lat = 28.0 + 43.945 / 60.0
p_lon = 360 - (88.0 + 22.607 / 60.0)
p_time = date2num(datetime(2010, 5, 30, 18, 22, 12),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Fill the netCDF4-classic dataset with the data in profile
symbols = ['temperature', 'pressure', 'wetlab_fluorescence', 'z',
'salinity', 'density', 'oxygen']
comments = ['measured', 'measured', 'measured', 'measured', 'measured',
'measured', 'measured']
long_names = ['Absolute temperature', 'pressure', 'Wetlab fluorescence',
'depth below the water surface', 'Practical salinity',
'Density', 'Oxygen']
std_names = ['temperature', 'pressure', 'wetlab fluorescence', 'depth',
'salinity', 'density', 'oxygen']
nc = get_filled_nc_db(nc, profile, symbols, units, comments, z_col,
long_names, std_names)
# Create a Profile object from this netCDF dataset and test the Profile
# methods
bm54 = get_profile_obj(nc, ['oxygen'], ['kg/m^3'])
# Close down the pipes to the netCDF dataset files
bm54.nc.close()
def test_from_txt():
"""
Test the ambient data methods on simple text files.
This unit test reads in the text files ./data/C.dat and
./data/T.dat using `numpy.loadtxt` and then uses this data to test
the data manipulation and storage methods in ambient.py.
"""
cdat_file = os.path.join(DATA_DIR,'C.dat')
tdat_file = os.path.join(DATA_DIR,'T.dat')
# Load in the raw data using np.loadtxt
C_raw = np.loadtxt(cdat_file, comments = '%')
T_raw = np.loadtxt(tdat_file, comments = '%')
# Clean the profile to remove depth reversals
C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01, 8.0000000e+02,
34, 2)
T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01, 7.9922631e+02,
34, 2)
# Convert the data to standard units
C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm'])
T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm'])
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_DS.nc')
summary = 'Py.Test test file'
source = 'Profiles from the SINTEF DeepSpill Report'
sea_name = 'Norwegian Sea'
p_lat = 64.99066
p_lon = 4.84725
p_time = date2num(datetime(2000, 6, 27, 12, 0, 0),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Fill the netCDF4-classic dataset with the data in the salinity profile
symbols = ['salinity', 'z']
comments = ['measured', 'measured']
long_names = ['Practical salinity', 'depth below the water surface']
std_names = ['salinity', 'depth']
nc = get_filled_nc_db(nc, C_data, symbols, C_units, comments, 1,
long_names, std_names)
# Because the temperature data will be interpolated to the vertical
# coordinates in the salinity profile, insert the data and test that
# insertion worked correctly by hand
symbols = ['temperature', 'z']
comments = ['measured', 'measured']
long_names = ['Absolute temperature', 'depth below the water surface']
std_names = ['temperature', 'depth']
nc = ambient.fill_nc_db(nc, T_data, symbols, T_units, comments, 1)
assert_array_almost_equal(nc.variables['z'][:],
C_data[:,1], decimal = 6)
z = nc.variables['z'][:]
T = nc.variables['temperature'][:]
f = interp1d(z, T)
for i in range(T_data.shape[0]):
assert_approx_equal(T_data[i,0], f(T_data[i,1]), significant = 5)
assert nc.variables['temperature'].comment == comments[0]
# Calculate and insert the pressure data
z = nc.variables['z'][:]
T = nc.variables['temperature'][:]
S = nc.variables['salinity'][:]
P = ambient.compute_pressure(z, T, S, 0)
P_data = np.vstack((z, P)).transpose()
nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'],
['measured', 'computed'], 0)
# Test the Profile object
ds = get_profile_obj(nc, [], [])
# Close down the pipes to the netCDF dataset files
ds.close_nc()
return ds
def test_using_numpy():
"""
Test the ambient data methods using only numpy
This unit test repeats the tests in `test_from_txt()`, but using only
the `numpy` array part of the `Profile` object instead of a netCDF
dataset.
"""
# Get the profile objuect using netCDF datasets
net_profile = test_from_txt()
# Get a platform-independent path to the datafile
cdat_file = os.path.join(DATA_DIR,'C.dat')
tdat_file = os.path.join(DATA_DIR,'T.dat')
# Load in the raw data using np.loadtxt
C_raw = np.loadtxt(cdat_file, comments = '%')
T_raw = np.loadtxt(tdat_file, comments = '%')
# Clean the profile to remove depth reversals
C_data = get_profile(C_raw, 1, 25, None, 0., 1.0256410e+01,
8.0000000e+02, 34, 2)
T_data = get_profile(T_raw, 1, 25, None, 0., 1.0831721e+01,
7.9922631e+02, 34, 2)
# Convert the data to standard units
C_data, C_units = get_units(C_data, ['psu', 'm'], 34, 2, ['psu', 'm'])
T_data, T_units = get_units(T_data, ['deg C', 'm'], 34, 2, ['K', 'm'])
# Create an numpy array to hold depth and salinity
var_names = ['depth', 'salinity']
var_units = ['m', 'psu']
data = np.zeros((C_data.shape[0], 3))
data[:,0] = C_data[:,1]
data[:,2] = C_data[:,0]
# Add the temperature data using the existing depth data
data = ambient.add_data(data, 1, 'temperature', T_data, ['temperature',
'z'], T_units, ['measured', 'measured'], 1)
z = data[:,0]
T = data[:,1]
S = data[:,2]
P = ambient.compute_pressure(z, T, S, 0)
P_data = np.vstack((z, P)).transpose()
data = ambient.add_data(data, 3, 'pressure', P_data, ['z', 'pressure'],
['m', 'Pa'], ['measured', 'measured'], 0)
# Select some current data
current = np.array([0.15, 0.])
current_units =['m/s', 'm/s']
# Create the profile object
ztsp = ['z', 'temperature', 'salinity', 'pressure']
ztsp_units = ['m', 'K', 'psu', 'Pa']
ds = ambient.Profile(data, ztsp, None, 0.01, ztsp_units, None,
current=current, current_units=current_units)
# Add these currents to the netCDF profile
current = np.array([[0., 0.15, 0., 0.],
[800., 0.15, 0., 0.]])
current_names = ['z', 'ua', 'va', 'wa']
current_units = ['m', 'm/s', 'm/s', 'm/s']
net_profile.append(current, current_names, current_units, z_col=0)
# Add the 'temp' data to the numpy dataset
z = ds.ds.coords['z'].values
T = ds.ds['temperature'].values
T_degC = T - 273.15
data = np.vstack((z, T_degC)).transpose()
symbols = ['z', 'temp']
units = ['m', 'deg C']
comments = ['measured', 'identical to temperature, but in deg C']
ds.append(data, symbols, units, comments, 0)
# Check if the two objects are equal
check_net_numpy(net_profile, ds)
return ds
def test_from_calcs():
"""
Test the ambient data methods on synthetic profiles.
This unit test creates synthetic data (e.g., profiles matching laboratory
idealized conditions) and then uses this data to test the data
manipulation and storage methods in ambient.py.
"""
# Create the synthetic temperature and salinity profiles
z = np.array([0.0, 2.4])
T = np.array([21.0, 20.0])
S = np.array([0.0, 30.0])
# Create an empty netCDF4-classic dataset to store the CTD information
nc_file = os.path.join(OUTPUT_DIR,'test_Lab.nc')
summary = 'Py.Test test file'
source = 'Synthetic profiles for idealized laboratory conditions'
sea_name = 'None'
p_lat = -999
p_lon = -999
p_time = date2num(datetime(2013, 7, 12, 11, 54, 0),
units = 'seconds since 1970-01-01 00:00:00 0:00',
calendar = 'julian')
nc = check_nc_db(nc_file, summary, source, sea_name, p_lat,
p_lon, p_time)
# Convert the temperature units
T, T_units = get_units(T, ['deg C'], 1, 2, ['K'])
# Fill the netCDF4-classic dataset with the data in these variables
nc = get_filled_nc_db(nc, z, ['z'], ['m'], ['synthetic'], 0,
['depth below the water surface'], ['depth'])
# Check that we cannot overwrite this existing z-data
try:
nc = ambient.fill_nc_db(nc, z, 'z', 'm', 'synthetic', 0)
except ValueError:
assert True is True
else:
assert True is False
# Fill in the remaining data
data = np.zeros((2, 3))
data[:,0] = z
data[:,1] = T
data[:,2] = S
nc = get_filled_nc_db(nc, data, ['z', 'temperature', 'salinity'],
['m', 'K', 'psu'],
['synthetic', 'synthetic', 'synthetic'], 0,
['depth below the water surface',
'Absolute temperature', 'Practical salinity'],
['depth', 'temperature', 'salinity'])
# Calculate and insert the pressure data
P = ambient.compute_pressure(data[:,0], data[:,1], data[:,2], 0)
P_data = np.vstack((data[:,0], P)).transpose()
nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'],
['measured', 'computed'], 0)
# Create and test a Profile object for this dataset.
lab = get_profile_obj(nc, [], [])
# Close down the pipes to the netCDF dataset files
lab.nc.close()
def check_from_roms():
"""
Test the ambient data methods on data read from ROMS.
this unit test reads in a ROMS netCDF output file, extracts the profile
information, and creates a new netCDF dataset and Profile class object
for use by the TAMOC modeling suite.
TODO (S. Socolofsky 7/15/2013): After fixing the octant.roms module to
have monotonically increasing depth, try to reinstate this test by
changing the function name from check_from_roms() to test_from_roms().
I was also having problems with being allowed to use the THREDDS netCDF
file with py.test. I could run the test under ipython, but not under
py.test.
"""
# Get a path to a ROMS dataset on a THREDDS server
nc_roms = 'http://barataria.tamu.edu:8080/thredds/dodsC/' + \
'ROMS_Daily/08122012/ocean_his_08122012_24.nc'
# Prepare the remaining inputs to the get_nc_db_from_roms() function
# call
nc_file = os.path.join(OUTPUT_DIR,'test_roms.nc')
t_idx = 0
j_idx = 400
i_idx = 420
chem_names = ['dye_01', 'dye_02']
(nc, nc_roms) = ambient.get_nc_db_from_roms(nc_roms, nc_file, t_idx,
j_idx, i_idx, chem_names)
# Check the data are inserted correctly from ROMS into the new netCDF
# dataset
assert nc.summary == 'ROMS Simulation Data'
assert nc.sea_name == 'ROMS'
assert nc.variables['z'][:].shape[0] == 51
assert nc.variables['z'][0] == nc.variables['z'].valid_min
assert nc.variables['z'][-1] == nc.variables['z'].valid_max
assert_approx_equal(nc.variables['temperature'][0], 303.24728393554688,
significant = 6)
assert_approx_equal(nc.variables['salinity'][0], 36.157352447509766,
significant = 6)
assert_approx_equal(nc.variables['pressure'][0], 101325.0,
significant = 6)
assert_approx_equal(nc.variables['dye_01'][0], 3.4363944759034656e-22,
significant = 6)
assert_approx_equal(nc.variables['dye_02'][0], 8.8296093939330156e-21,
significant = 6)
assert_approx_equal(nc.variables['temperature'][-1], 290.7149658203125,
significant = 6)
assert_approx_equal(nc.variables['salinity'][-1], 35.829414367675781,
significant = 6)
assert_approx_equal(nc.variables['pressure'][-1], 3217586.2927573984,
significant = 6)
assert_approx_equal(nc.variables['dye_01'][-1], 8.7777050221856635e-22,
significant = 6)
assert_approx_equal(nc.variables['dye_02'][-1], 4.0334050451121613e-20,
significant = 6)
# Create a Profile object from this netCDF dataset and test the Profile
# methods
roms = get_profile_obj(nc, chem_names, ['kg/m^3', 'kg/m^3'])
# Close the pipe to the netCDF dataset
roms.nc.close()
nc_roms.close()
def test_profile_deeper():
"""
Test the methods to compute buoyancy_frequency and to extend a CTD profile
to greater depths. We just test the data from ctd_bm54.cnv since these
methods are independent of the source of data.
"""
# Make sure the netCDF file for the ctd_BM54.cnv is already created by
# running the test file that creates it.
test_from_ctd()
# Get a Profile object from this dataset
nc_file = os.path.join(OUTPUT_DIR,'test_BM54.nc')
ctd = ambient.Profile(nc_file, chem_names=['oxygen'])
# Compute the buoyancy frequency at 1500 m and verify that the result is
# correct
N = ctd.buoyancy_frequency(1529.789, h=0.01)
assert_approx_equal(N, 0.00061463758327116565, significant=6)
# Record a few values to check after running the extension method
T0, S0, P0, o20 = ctd.get_values(1000., ['temperature', 'salinity',
'pressure', 'oxygen'])
z0 = ctd.interp_ds.coords['z'].values
# Extend the profile to 2500 m
nc_file = os.path.join(OUTPUT_DIR,'test_BM54_deeper.nc')
ctd.extend_profile_deeper(2500., nc_file)
# Check if the original data is preserved
T1, S1, P1, o21 = ctd.get_values(1000., ['temperature', 'salinity',
'pressure', 'oxygen'])
z1 = ctd.interp_ds.coords['z'].values
# Make sure the results are still right
assert_approx_equal(T1, T0, significant=6)
assert_approx_equal(S1, S0, significant=6)
assert_approx_equal(P1, P0, significant=6)
assert_approx_equal(o21, o20, significant=6)
print(z1.shape, z0.shape)
assert z1.shape[0] > z0.shape[0]
assert z1[-1] == 2500.
# Note that the buoyancy frequency shifts very slightly because density
# is not linearly proportional to salinity. Nonetheless, the results are
# close to what we want, so this method of extending the profile works
# adequately.
N = ctd.buoyancy_frequency(1500.)
assert_approx_equal(N, 0.0006320416080592639, significant=6)
N = ctd.buoyancy_frequency(2500.)
assert_approx_equal(N, 0.0006146292892002274, significant=6)
ctd.close_nc()
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
import swift.common.db
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.utils import (
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, urlparse, validate_sync_to,
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND
from swift.common.wsgi import ConfigString
# The default internal client config body is to support upgrades without
# requiring deployment of the new /etc/swift/internal-client.conf
ic_conf_body = """
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host =
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[pipeline:main]
pipeline = catch_errors proxy-logging cache proxy-server
[app:proxy-server]
use = egg:swift#proxy
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
# See proxy-server.conf-sample for options
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options
""".lstrip()
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
.. note::
Container sync will sync object POSTs only if the proxy server is set
to use "object_post_as_copy = true" which is the default. So-called
fast object posts, "object_post_as_copy = false" do not update the
container listings and therefore can't be detected for synchronization.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, logger=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = logger or get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a container sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Time of last stats report.
self.reported = time()
self.swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(self.swift_dir,
ring_name='container')
bind_ip = conf.get('bind_ip', '0.0.0.0')
self._myips = whataremyips(bind_ip)
self._myport = int(conf.get('bind_port', 6001))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.conn_timeout = float(conf.get('conn_timeout', 5))
request_tries = int(conf.get('request_tries') or 3)
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
_('Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options'))
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
try:
self.swift = InternalClient(
internal_client_conf, 'Swift Container Sync', request_tries)
except IOError as err:
if err.errno != errno.ENOENT:
raise
raise SystemExit(
_('Unable to load internal client from config: %r (%s)') %
(internal_client_conf_path, err))
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if is_local_device(self._myips, self._myport,
node['ip'], node['port']):
break
else:
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.items():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
stop_at = time() + self.container_time
next_sync_point = None
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.) and will skip
# problematic rows as needed in case of faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the nodes
# didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None, next_sync_point)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed or in case it failed to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
# extract last modified time from the created_at value
ts_data, ts_ctype, ts_meta = decode_timestamps(
row['created_at'])
if row['deleted']:
# when sync'ing a deleted object, use ts_data - this is the
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(
row['name'])
sig = self.realms_conf.get_sig(
'DELETE', path, headers['x-timestamp'], nonce,
realm_key, user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
timeout=self.conn_timeout)
except ClientException as err:
if err.http_status != HTTP_NOT_FOUND:
raise
self.container_deletes += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
exc = None
# look up for the newest one
headers_out = {'X-Newest': True,
'X-Backend-Storage-Policy-Index':
str(info['storage_policy_index'])}
try:
source_obj_status, headers, body = \
self.swift.get_object(info['account'],
info['container'], row['name'],
headers=headers_out,
acceptable_statuses=(2, 4))
except (Exception, UnexpectedResponse, Timeout) as err:
headers = {}
body = None
exc = err
timestamp = Timestamp(headers.get('x-timestamp', 0))
if timestamp < ts_meta:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: '
'%(account)r %(container)r %(object)r'),
{'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = headers['etag'].strip('"')
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(row['name'])
sig = self.realms_conf.get_sig(
'PUT', path, headers['x-timestamp'], nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (
realm, nonce, sig)
else:
headers['x-container-sync-key'] = user_key
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,
timeout=self.conn_timeout)
self.container_puts += 1
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout) as err:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
| |
# Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import struct
from ryu import utils
from ryu.lib import type_desc
from ryu.ofproto import nicira_ext
from ryu.ofproto import ofproto_common
from ryu.lib.pack_utils import msg_pack_into
from ryu.ofproto.ofproto_parser import StringifyMixin
def generate(ofp_name, ofpp_name):
import sys
ofp = sys.modules[ofp_name]
ofpp = sys.modules[ofpp_name]
class _NXFlowSpec(StringifyMixin):
_hdr_fmt_str = '!H' # 2 bit 0s, 1 bit src, 2 bit dst, 11 bit n_bits
_dst_type = None
_subclasses = {}
_TYPE = {
'nx-flow-spec-field': [
'src',
'dst',
]
}
def __init__(self, src, dst, n_bits):
self.src = src
self.dst = dst
self.n_bits = n_bits
@classmethod
def register(cls, subcls):
assert issubclass(subcls, cls)
assert subcls._dst_type not in cls._subclasses
cls._subclasses[subcls._dst_type] = subcls
@classmethod
def parse(cls, buf):
(hdr,) = struct.unpack_from(cls._hdr_fmt_str, buf, 0)
rest = buf[struct.calcsize(cls._hdr_fmt_str):]
if hdr == 0:
return None, rest # all-0 header is no-op for padding
src_type = (hdr >> 13) & 0x1
dst_type = (hdr >> 11) & 0x3
n_bits = hdr & 0x3ff
subcls = cls._subclasses[dst_type]
if src_type == 0: # subfield
src = cls._parse_subfield(rest)
rest = rest[6:]
elif src_type == 1: # immediate
src_len = (n_bits + 15) // 16 * 2
src_bin = rest[:src_len]
src = type_desc.IntDescr(size=src_len).to_user(src_bin)
rest = rest[src_len:]
if dst_type == 0: # match
dst = cls._parse_subfield(rest)
rest = rest[6:]
elif dst_type == 1: # load
dst = cls._parse_subfield(rest)
rest = rest[6:]
elif dst_type == 2: # output
dst = '' # empty
return subcls(src=src, dst=dst, n_bits=n_bits), rest
def serialize(self):
buf = bytearray()
if isinstance(self.src, tuple):
src_type = 0 # subfield
else:
src_type = 1 # immediate
# header
val = (src_type << 13) | (self._dst_type << 11) | self.n_bits
msg_pack_into(self._hdr_fmt_str, buf, 0, val)
# src
if src_type == 0: # subfield
buf += self._serialize_subfield(self.src)
elif src_type == 1: # immediate
src_len = (self.n_bits + 15) // 16 * 2
buf += type_desc.IntDescr(size=src_len).from_user(self.src)
# dst
if self._dst_type == 0: # match
buf += self._serialize_subfield(self.dst)
elif self._dst_type == 1: # load
buf += self._serialize_subfield(self.dst)
elif self._dst_type == 2: # output
pass # empty
return buf
@staticmethod
def _parse_subfield(buf):
(n, len) = ofp.oxm_parse_header(buf, 0)
assert len == 4 # only 4-bytes NXM/OXM are defined
field = ofp.oxm_to_user_header(n)
rest = buf[len:]
(ofs,) = struct.unpack_from('!H', rest, 0)
return (field, ofs)
@staticmethod
def _serialize_subfield(subfield):
(field, ofs) = subfield
buf = bytearray()
n = ofp.oxm_from_user_header(field)
ofp.oxm_serialize_header(n, buf, 0)
assert len(buf) == 4 # only 4-bytes NXM/OXM are defined
msg_pack_into('!H', buf, 4, ofs)
return buf
class NXFlowSpecMatch(_NXFlowSpec):
# Add a match criteria
# an example of the corresponding ovs-ofctl syntax:
# NXM_OF_VLAN_TCI[0..11]
_dst_type = 0
class NXFlowSpecLoad(_NXFlowSpec):
# Add NXAST_REG_LOAD actions
# an example of the corresponding ovs-ofctl syntax:
# NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]
_dst_type = 1
class NXFlowSpecOutput(_NXFlowSpec):
# Add an OFPAT_OUTPUT action
# an example of the corresponding ovs-ofctl syntax:
# output:NXM_OF_IN_PORT[]
_dst_type = 2
def __init__(self, src, n_bits, dst=''):
assert dst == ''
super(NXFlowSpecOutput, self).__init__(src=src, dst=dst,
n_bits=n_bits)
class NXAction(ofpp.OFPActionExperimenter):
_fmt_str = '!H' # subtype
_subtypes = {}
_experimenter = ofproto_common.NX_EXPERIMENTER_ID
def __init__(self):
super(NXAction, self).__init__(experimenter=self._experimenter)
self.subtype = self._subtype
@classmethod
def parse(cls, buf):
fmt_str = NXAction._fmt_str
(subtype,) = struct.unpack_from(fmt_str, buf, 0)
subtype_cls = cls._subtypes.get(subtype)
rest = buf[struct.calcsize(fmt_str):]
if subtype_cls is None:
return NXActionUnknown(subtype, rest)
return subtype_cls.parse(rest)
def serialize(self, buf, offset):
super(NXAction, self).serialize(buf, offset)
msg_pack_into(NXAction._fmt_str,
buf,
offset + ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE,
self.subtype)
@classmethod
def register(cls, subtype_cls):
assert subtype_cls._subtype is not cls._subtypes
cls._subtypes[subtype_cls._subtype] = subtype_cls
class NXActionUnknown(NXAction):
def __init__(self, subtype, data=None,
type_=None, len_=None, experimenter=None):
self._subtype = subtype
super(NXActionUnknown, self).__init__()
self.data = data
@classmethod
def parse(cls, subtype, buf):
return cls(data=buf)
def serialize(self, buf, offset):
# fixup
data = self.data
if data is None:
data = bytearray()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionUnknown, self).serialize(buf, offset)
buf += data
class NXActionRegLoad(NXAction):
_subtype = nicira_ext.NXAST_REG_LOAD
_fmt_str = '!HIQ' # ofs_nbits, dst, value
_TYPE = {
'ascii': [
'dst',
]
}
def __init__(self, ofs, nbits, dst, value,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionRegLoad, self).__init__()
self.ofs = ofs
self.nbits = nbits
self.dst = dst
self.value = value
@classmethod
def parse(cls, buf):
(ofs_nbits, dst, value,) = struct.unpack_from(
NXActionRegLoad._fmt_str, buf, 0)
ofs = ofs_nbits >> 6
nbits = (ofs_nbits & ((1 << 6) - 1)) + 1
# Right-shift instead of using oxm_parse_header for simplicity...
dst_name = ofp.oxm_to_user_header(dst >> 9)
return cls(ofs, nbits, dst_name, value)
def serialize(self, buf, offset):
hdr_data = bytearray()
n = ofp.oxm_from_user_header(self.dst)
ofp.oxm_serialize_header(n, hdr_data, 0)
(dst_num,) = struct.unpack_from('!I', six.binary_type(hdr_data), 0)
ofs_nbits = (self.ofs << 6) + self.nbits - 1
data = bytearray()
msg_pack_into(NXActionRegLoad._fmt_str, data, 0,
ofs_nbits, dst_num, self.value)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionRegLoad, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionRegMove(NXAction):
_subtype = nicira_ext.NXAST_REG_MOVE
_fmt_str = '!HHH' # n_bits, src_ofs, dst_ofs
# Followed by OXM fields (src, dst) and padding to 8 bytes boundary
_TYPE = {
'ascii': [
'src_field',
'dst_field',
]
}
def __init__(self, src_field, dst_field, n_bits, src_ofs=0, dst_ofs=0,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionRegMove, self).__init__()
self.n_bits = n_bits
self.src_ofs = src_ofs
self.dst_ofs = dst_ofs
self.src_field = src_field
self.dst_field = dst_field
@classmethod
def parse(cls, buf):
(n_bits, src_ofs, dst_ofs,) = struct.unpack_from(
NXActionRegMove._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionRegMove._fmt_str):]
# src field
(n, len) = ofp.oxm_parse_header(rest, 0)
src_field = ofp.oxm_to_user_header(n)
rest = rest[len:]
# dst field
(n, len) = ofp.oxm_parse_header(rest, 0)
dst_field = ofp.oxm_to_user_header(n)
rest = rest[len:]
# ignore padding
return cls(src_field, dst_field=dst_field, n_bits=n_bits,
src_ofs=src_ofs, dst_ofs=dst_ofs)
def serialize(self, buf, offset):
# fixup
data = bytearray()
msg_pack_into(NXActionRegMove._fmt_str, data, 0,
self.n_bits, self.src_ofs, self.dst_ofs)
# src field
n = ofp.oxm_from_user_header(self.src_field)
ofp.oxm_serialize_header(n, data, len(data))
# dst field
n = ofp.oxm_from_user_header(self.dst_field)
ofp.oxm_serialize_header(n, data, len(data))
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionRegMove, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionLearn(NXAction):
_subtype = nicira_ext.NXAST_LEARN
# idle_timeout, hard_timeout, priority, cookie, flags,
# table_id, pad, fin_idle_timeout, fin_hard_timeout
_fmt_str = '!HHHQHBxHH'
# Followed by flow_mod_specs
def __init__(self,
table_id,
specs,
idle_timeout=0,
hard_timeout=0,
priority=ofp.OFP_DEFAULT_PRIORITY,
cookie=0,
flags=0,
fin_idle_timeout=0,
fin_hard_timeout=0,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionLearn, self).__init__()
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.priority = priority
self.cookie = cookie
self.flags = flags
self.table_id = table_id
self.fin_idle_timeout = fin_idle_timeout
self.fin_hard_timeout = fin_hard_timeout
self.specs = specs
@classmethod
def parse(cls, buf):
(idle_timeout,
hard_timeout,
priority,
cookie,
flags,
table_id,
fin_idle_timeout,
fin_hard_timeout,) = struct.unpack_from(
NXActionLearn._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionLearn._fmt_str):]
# specs
specs = []
while len(rest) > 0:
spec, rest = _NXFlowSpec.parse(rest)
if spec is None:
continue
specs.append(spec)
return cls(idle_timeout=idle_timeout,
hard_timeout=hard_timeout,
priority=priority,
cookie=cookie,
flags=flags,
table_id=table_id,
fin_idle_timeout=fin_idle_timeout,
fin_hard_timeout=fin_hard_timeout,
specs=specs)
def serialize(self, buf, offset):
# fixup
data = bytearray()
msg_pack_into(NXActionLearn._fmt_str, data, 0,
self.idle_timeout,
self.hard_timeout,
self.priority,
self.cookie,
self.flags,
self.table_id,
self.fin_idle_timeout,
self.fin_hard_timeout)
for spec in self.specs:
data += spec.serialize()
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionLearn, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionConjunction(NXAction):
_subtype = nicira_ext.NXAST_CONJUNCTION
# clause, n_clauses, id
_fmt_str = '!BBI'
def __init__(self,
clause,
n_clauses,
id_,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionConjunction, self).__init__()
self.clause = clause
self.n_clauses = n_clauses
self.id = id_
@classmethod
def parse(cls, buf):
(clause,
n_clauses,
id_,) = struct.unpack_from(
NXActionConjunction._fmt_str, buf, 0)
return cls(clause, n_clauses, id_)
def serialize(self, buf, offset):
data = bytearray()
msg_pack_into(NXActionConjunction._fmt_str, data, 0,
self.clause,
self.n_clauses,
self.id)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionConjunction, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionResubmitTable(NXAction):
_subtype = nicira_ext.NXAST_RESUBMIT_TABLE
# in_port, table_id
_fmt_str = '!HB3x'
def __init__(self,
in_port,
table_id,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionResubmitTable, self).__init__()
self.in_port = in_port
self.table_id = table_id
@classmethod
def parse(cls, buf):
(in_port,
table_id) = struct.unpack_from(
NXActionResubmitTable._fmt_str, buf, 0)
return cls(in_port, table_id)
def serialize(self, buf, offset):
data = bytearray()
msg_pack_into(NXActionResubmitTable._fmt_str, data, 0,
self.in_port,
self.table_id)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionResubmitTable, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionCT(NXAction):
_subtype = nicira_ext.NXAST_CT
# flags, zone_src, zone_ofs_nbits (zone_imm), recirc_table,
# pad, alg
_fmt_str = '!HIHB3xH'
# Followed by actions
def __init__(self,
flags,
zone_src,
zone_ofs_nbits, # is zone_imm if zone_src == 0
recirc_table,
alg,
actions,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionCT, self).__init__()
self.flags = flags
self.zone_src = zone_src
self.zone_ofs_nbits = zone_ofs_nbits
self.recirc_table = recirc_table
self.alg = alg
self.actions = actions
@classmethod
def parse(cls, buf):
(flags,
zone_src,
zone_ofs_nbits,
recirc_table,
alg,) = struct.unpack_from(
NXActionCT._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionCT._fmt_str):]
# actions
actions = []
while len(rest) > 0:
action = ofpp.OFPAction.parser(rest, 0)
actions.append(action)
rest = rest[action.len:]
return cls(flags, zone_src, zone_ofs_nbits, recirc_table,
alg, actions)
def serialize(self, buf, offset):
data = bytearray()
msg_pack_into(NXActionCT._fmt_str, data, 0,
self.flags,
self.zone_src,
self.zone_ofs_nbits,
self.recirc_table,
self.alg)
for a in self.actions:
a.serialize(data, len(data))
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionCT, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
class NXActionNAT(NXAction):
_subtype = nicira_ext.NXAST_NAT
# pad, flags, range_present
_fmt_str = '!2xHH'
# Followed by optional parameters
_TYPE = {
'ascii': [
'range_ipv4_max',
'range_ipv4_min',
'range_ipv6_max',
'range_ipv6_min',
]
}
def __init__(self,
flags,
range_ipv4_min='',
range_ipv4_max='',
range_ipv6_min='',
range_ipv6_max='',
range_proto_min=None,
range_proto_max=None,
type_=None, len_=None, experimenter=None, subtype=None):
super(NXActionNAT, self).__init__()
self.flags = flags
self.range_ipv4_min = range_ipv4_min
self.range_ipv4_max = range_ipv4_max
self.range_ipv6_min = range_ipv6_min
self.range_ipv6_max = range_ipv6_max
self.range_proto_min = range_proto_min
self.range_proto_max = range_proto_max
@classmethod
def parse(cls, buf):
(flags,
range_present) = struct.unpack_from(
NXActionNAT._fmt_str, buf, 0)
rest = buf[struct.calcsize(NXActionNAT._fmt_str):]
# optional parameters
kwargs = dict()
if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MIN:
kwargs['range_ipv4_min'] = type_desc.IPv4Addr.to_user(rest[:4])
rest = rest[4:]
if range_present & nicira_ext.NX_NAT_RANGE_IPV4_MAX:
kwargs['range_ipv4_max'] = type_desc.IPv4Addr.to_user(rest[:4])
rest = rest[4:]
if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MIN:
kwargs['range_ipv6_min'] = (
type_desc.IPv6Addr.to_user(rest[:16]))
rest = rest[16:]
if range_present & nicira_ext.NX_NAT_RANGE_IPV6_MAX:
kwargs['range_ipv6_max'] = (
type_desc.IPv6Addr.to_user(rest[:16]))
rest = rest[16:]
if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MIN:
kwargs['range_proto_min'] = type_desc.Int2.to_user(rest[:2])
rest = rest[2:]
if range_present & nicira_ext.NX_NAT_RANGE_PROTO_MAX:
kwargs['range_proto_max'] = type_desc.Int2.to_user(rest[:2])
return cls(flags, **kwargs)
def serialize(self, buf, offset):
# Pack optional parameters first, as range_present needs
# to be calculated.
optional_data = b''
range_present = 0
if self.range_ipv4_min != '':
range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MIN
optional_data += type_desc.IPv4Addr.from_user(
self.range_ipv4_min)
if self.range_ipv4_max != '':
range_present |= nicira_ext.NX_NAT_RANGE_IPV4_MAX
optional_data += type_desc.IPv4Addr.from_user(
self.range_ipv4_max)
if self.range_ipv6_min != '':
range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MIN
optional_data += type_desc.IPv6Addr.from_user(
self.range_ipv6_min)
if self.range_ipv6_max != '':
range_present |= nicira_ext.NX_NAT_RANGE_IPV6_MAX
optional_data += type_desc.IPv6Addr.from_user(
self.range_ipv6_max)
if self.range_proto_min is not None:
range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MIN
optional_data += type_desc.Int2.from_user(
self.range_proto_min)
if self.range_proto_max is not None:
range_present |= nicira_ext.NX_NAT_RANGE_PROTO_MAX
optional_data += type_desc.Int2.from_user(
self.range_proto_max)
data = bytearray()
msg_pack_into(NXActionNAT._fmt_str, data, 0,
self.flags,
range_present)
msg_pack_into('!%ds' % len(optional_data), data, len(data),
optional_data)
payload_offset = (
ofp.OFP_ACTION_EXPERIMENTER_HEADER_SIZE +
struct.calcsize(NXAction._fmt_str)
)
self.len = utils.round_up(payload_offset + len(data), 8)
super(NXActionNAT, self).serialize(buf, offset)
msg_pack_into('!%ds' % len(data), buf, offset + payload_offset,
bytes(data))
def add_attr(k, v):
v.__module__ = ofpp.__name__ # Necessary for stringify stuff
setattr(ofpp, k, v)
add_attr('NXAction', NXAction)
add_attr('NXActionUnknown', NXActionUnknown)
classes = [
'NXActionRegLoad',
'NXActionRegMove',
'NXActionLearn',
'NXActionConjunction',
'NXActionResubmitTable',
'NXActionCT',
'NXActionNAT',
'_NXFlowSpec', # exported for testing
'NXFlowSpecMatch',
'NXFlowSpecLoad',
'NXFlowSpecOutput',
]
vars = locals()
for name in classes:
cls = vars[name]
add_attr(name, cls)
if issubclass(cls, NXAction):
NXAction.register(cls)
if issubclass(cls, _NXFlowSpec):
_NXFlowSpec.register(cls)
| |
import pytest
from netaddr import cidr_abbrev_to_verbose
from netaddr.strategy.ipv4 import expand_partial_address
def test_cidr_abbrev_to_verbose():
octets = range(0, 256)
cidrs = [cidr_abbrev_to_verbose(octet) for octet in octets]
assert len(cidrs) == 256
assert cidrs == [
'0.0.0.0/8',
'1.0.0.0/8',
'2.0.0.0/8',
'3.0.0.0/8',
'4.0.0.0/8',
'5.0.0.0/8',
'6.0.0.0/8',
'7.0.0.0/8',
'8.0.0.0/8',
'9.0.0.0/8',
'10.0.0.0/8',
'11.0.0.0/8',
'12.0.0.0/8',
'13.0.0.0/8',
'14.0.0.0/8',
'15.0.0.0/8',
'16.0.0.0/8',
'17.0.0.0/8',
'18.0.0.0/8',
'19.0.0.0/8',
'20.0.0.0/8',
'21.0.0.0/8',
'22.0.0.0/8',
'23.0.0.0/8',
'24.0.0.0/8',
'25.0.0.0/8',
'26.0.0.0/8',
'27.0.0.0/8',
'28.0.0.0/8',
'29.0.0.0/8',
'30.0.0.0/8',
'31.0.0.0/8',
'32.0.0.0/8',
'33.0.0.0/8',
'34.0.0.0/8',
'35.0.0.0/8',
'36.0.0.0/8',
'37.0.0.0/8',
'38.0.0.0/8',
'39.0.0.0/8',
'40.0.0.0/8',
'41.0.0.0/8',
'42.0.0.0/8',
'43.0.0.0/8',
'44.0.0.0/8',
'45.0.0.0/8',
'46.0.0.0/8',
'47.0.0.0/8',
'48.0.0.0/8',
'49.0.0.0/8',
'50.0.0.0/8',
'51.0.0.0/8',
'52.0.0.0/8',
'53.0.0.0/8',
'54.0.0.0/8',
'55.0.0.0/8',
'56.0.0.0/8',
'57.0.0.0/8',
'58.0.0.0/8',
'59.0.0.0/8',
'60.0.0.0/8',
'61.0.0.0/8',
'62.0.0.0/8',
'63.0.0.0/8',
'64.0.0.0/8',
'65.0.0.0/8',
'66.0.0.0/8',
'67.0.0.0/8',
'68.0.0.0/8',
'69.0.0.0/8',
'70.0.0.0/8',
'71.0.0.0/8',
'72.0.0.0/8',
'73.0.0.0/8',
'74.0.0.0/8',
'75.0.0.0/8',
'76.0.0.0/8',
'77.0.0.0/8',
'78.0.0.0/8',
'79.0.0.0/8',
'80.0.0.0/8',
'81.0.0.0/8',
'82.0.0.0/8',
'83.0.0.0/8',
'84.0.0.0/8',
'85.0.0.0/8',
'86.0.0.0/8',
'87.0.0.0/8',
'88.0.0.0/8',
'89.0.0.0/8',
'90.0.0.0/8',
'91.0.0.0/8',
'92.0.0.0/8',
'93.0.0.0/8',
'94.0.0.0/8',
'95.0.0.0/8',
'96.0.0.0/8',
'97.0.0.0/8',
'98.0.0.0/8',
'99.0.0.0/8',
'100.0.0.0/8',
'101.0.0.0/8',
'102.0.0.0/8',
'103.0.0.0/8',
'104.0.0.0/8',
'105.0.0.0/8',
'106.0.0.0/8',
'107.0.0.0/8',
'108.0.0.0/8',
'109.0.0.0/8',
'110.0.0.0/8',
'111.0.0.0/8',
'112.0.0.0/8',
'113.0.0.0/8',
'114.0.0.0/8',
'115.0.0.0/8',
'116.0.0.0/8',
'117.0.0.0/8',
'118.0.0.0/8',
'119.0.0.0/8',
'120.0.0.0/8',
'121.0.0.0/8',
'122.0.0.0/8',
'123.0.0.0/8',
'124.0.0.0/8',
'125.0.0.0/8',
'126.0.0.0/8',
'127.0.0.0/8',
'128.0.0.0/16',
'129.0.0.0/16',
'130.0.0.0/16',
'131.0.0.0/16',
'132.0.0.0/16',
'133.0.0.0/16',
'134.0.0.0/16',
'135.0.0.0/16',
'136.0.0.0/16',
'137.0.0.0/16',
'138.0.0.0/16',
'139.0.0.0/16',
'140.0.0.0/16',
'141.0.0.0/16',
'142.0.0.0/16',
'143.0.0.0/16',
'144.0.0.0/16',
'145.0.0.0/16',
'146.0.0.0/16',
'147.0.0.0/16',
'148.0.0.0/16',
'149.0.0.0/16',
'150.0.0.0/16',
'151.0.0.0/16',
'152.0.0.0/16',
'153.0.0.0/16',
'154.0.0.0/16',
'155.0.0.0/16',
'156.0.0.0/16',
'157.0.0.0/16',
'158.0.0.0/16',
'159.0.0.0/16',
'160.0.0.0/16',
'161.0.0.0/16',
'162.0.0.0/16',
'163.0.0.0/16',
'164.0.0.0/16',
'165.0.0.0/16',
'166.0.0.0/16',
'167.0.0.0/16',
'168.0.0.0/16',
'169.0.0.0/16',
'170.0.0.0/16',
'171.0.0.0/16',
'172.0.0.0/16',
'173.0.0.0/16',
'174.0.0.0/16',
'175.0.0.0/16',
'176.0.0.0/16',
'177.0.0.0/16',
'178.0.0.0/16',
'179.0.0.0/16',
'180.0.0.0/16',
'181.0.0.0/16',
'182.0.0.0/16',
'183.0.0.0/16',
'184.0.0.0/16',
'185.0.0.0/16',
'186.0.0.0/16',
'187.0.0.0/16',
'188.0.0.0/16',
'189.0.0.0/16',
'190.0.0.0/16',
'191.0.0.0/16',
'192.0.0.0/24',
'193.0.0.0/24',
'194.0.0.0/24',
'195.0.0.0/24',
'196.0.0.0/24',
'197.0.0.0/24',
'198.0.0.0/24',
'199.0.0.0/24',
'200.0.0.0/24',
'201.0.0.0/24',
'202.0.0.0/24',
'203.0.0.0/24',
'204.0.0.0/24',
'205.0.0.0/24',
'206.0.0.0/24',
'207.0.0.0/24',
'208.0.0.0/24',
'209.0.0.0/24',
'210.0.0.0/24',
'211.0.0.0/24',
'212.0.0.0/24',
'213.0.0.0/24',
'214.0.0.0/24',
'215.0.0.0/24',
'216.0.0.0/24',
'217.0.0.0/24',
'218.0.0.0/24',
'219.0.0.0/24',
'220.0.0.0/24',
'221.0.0.0/24',
'222.0.0.0/24',
'223.0.0.0/24',
'224.0.0.0/4',
'225.0.0.0/4',
'226.0.0.0/4',
'227.0.0.0/4',
'228.0.0.0/4',
'229.0.0.0/4',
'230.0.0.0/4',
'231.0.0.0/4',
'232.0.0.0/4',
'233.0.0.0/4',
'234.0.0.0/4',
'235.0.0.0/4',
'236.0.0.0/4',
'237.0.0.0/4',
'238.0.0.0/4',
'239.0.0.0/4',
'240.0.0.0/32',
'241.0.0.0/32',
'242.0.0.0/32',
'243.0.0.0/32',
'244.0.0.0/32',
'245.0.0.0/32',
'246.0.0.0/32',
'247.0.0.0/32',
'248.0.0.0/32',
'249.0.0.0/32',
'250.0.0.0/32',
'251.0.0.0/32',
'252.0.0.0/32',
'253.0.0.0/32',
'254.0.0.0/32',
'255.0.0.0/32',
]
def test_cidr_abbrev_to_verbose_invalid_prefixlen():
assert cidr_abbrev_to_verbose('192.0.2.0/33') == '192.0.2.0/33'
def test_expand_partial_address():
assert expand_partial_address('10') == '10.0.0.0'
assert expand_partial_address('10.1') == '10.1.0.0'
assert expand_partial_address('192.168.1') == '192.168.1.0'
| |
"""
Convention:
"attr" means the original attribute object.
"pattr" means class PrettyAttribute instance.
"""
from __future__ import print_function
import inspect
import platform
from sys import _getframe
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from ._internal_utils import get_attr_from_dict, is_ptpython
from .attr_category import AttrCategory, get_attr_category, category_match
from .constants import dummy_obj, GETTER, SETTER, DELETER
from . import format
if platform.system() == 'Windows':
from colorama import init
init() # To support Windows.
class PrettyDir:
"""Class that provides pretty dir and search API."""
def __init__(
self, obj: Any = dummy_obj, pattrs: Optional[List['PrettyAttribute']] = None
) -> None:
"""
Args:
obj: The object to inspect.
pattrs: Used when returning search result.
"""
self.obj = obj
if pattrs is None:
if obj is dummy_obj:
# User is calling dir() without arguments.
attrs = _getframe(1).f_locals
self.dir_result = sorted(list(attrs.keys()))
else:
self.dir_result = dir(self.obj)
attrs = {
name: get_attr_from_dict(self.obj, name) for name in self.dir_result
}
self.pattrs = [
PrettyAttribute(name, get_attr_category(name, attr, obj), attr)
for name, attr in attrs.items()
]
else:
self.pattrs = pattrs
self.dir_result = sorted([p.name for p in pattrs])
def __repr__(self) -> str:
if not is_ptpython():
return format.format_pattrs(self.pattrs)
print(format.format_pattrs(self.pattrs), end='')
return ''
def __len__(self) -> int:
return len(self.dir_result)
def __getitem__(self, index: int) -> str:
return self.dir_result[index]
def index(self, value):
return self.dir_result.index(value)
def search(self, term: str, case_sensitive: bool = False) -> 'PrettyDir':
"""Searches for names that match some pattern.
Args:
term: String used to match names. A name is returned if it matches
the whole search term.
case_sensitive: Boolean to match case or not, default is False
(case insensitive).
Return:
A PrettyDir object with matched names.
"""
if case_sensitive:
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name]
)
term = term.lower()
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if term in pattr.name.lower()]
)
s = search
# Below methods "methods", "public", "own" can be chained when necessary.
# That is, for listing all public methods that are not inherited,
# use pdir(obj).public.own.methods
# The order should not affect results.
@property
def properties(self) -> 'PrettyDir':
"""Returns all properties of the inspected object.
Note that "properties" can mean "variables".
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.PROPERTY)
],
)
@property
def methods(self) -> 'PrettyDir':
"""Returns all methods of the inspected object.
Note that "methods" can mean "functions" when inspecting a module.
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if category_match(pattr.category, AttrCategory.FUNCTION)
],
)
@property
def public(self) -> 'PrettyDir':
"""Returns public attributes of the inspected object."""
return PrettyDir(
self.obj, [pattr for pattr in self.pattrs if not pattr.name.startswith('_')]
)
@property
def own(self) -> 'PrettyDir':
"""Returns attributes that are not inhterited from parent classes.
Now we only use a simple judgement, it is expected that many attributes
not get returned, especially invoked on a module.
For instance, there's no way to distinguish between properties that
are initialized in instance class's __init__ and parent class's
__init__(assuming super() is called). So we'll just leave it.
"""
return PrettyDir(
self.obj,
[
pattr
for pattr in self.pattrs
if pattr.name in type(self.obj).__dict__
or pattr.name in self.obj.__dict__
],
)
class PrettyAttribute:
def __init__(
self, name: str, category: Tuple[AttrCategory, ...], attr_obj: Any
) -> None:
self.name = name
self.category = category
# Names are grouped by their category. When multiple categories exist,
# pick the largest one which usually represents a more detailed
# category.
self.display_group = max(category)
self.attr_obj = attr_obj
self.doc = self.get_oneline_doc()
# single category can not be a bare slot
self.slotted = AttrCategory.SLOT in self.category
def __repr__(self):
return '%s: %s' % (self.name, self.category)
def get_oneline_doc(self) -> str:
"""
Doc doesn't necessarily mean doctring. It could be anything that
should be put after the attr's name as an explanation.
"""
attr = self.attr_obj
if self.display_group == AttrCategory.DESCRIPTOR:
if isinstance(attr, property):
doc_list = ['@property with getter']
if attr.fset:
doc_list.append(SETTER)
if attr.fdel:
doc_list.append(DELETER)
else:
doc_list = ['class %s' % attr.__class__.__name__]
if hasattr(attr, '__get__'):
doc_list.append(GETTER)
if hasattr(attr, '__set__'):
doc_list.append(SETTER)
if hasattr(attr, '__delete__'):
doc_list.append(DELETER)
doc_list[0] = ' '.join([doc_list[0], 'with', doc_list.pop(1)])
doc = inspect.getdoc(attr)
if doc is not None:
doc_list.append(doc.split('\n', 1)[0])
return ', '.join(doc_list)
try:
hasattr_doc = hasattr(attr, '__doc__')
except:
hasattr_doc = False
if hasattr_doc:
doc = inspect.getdoc(attr)
return doc.split('\n', 1)[0] if doc else '' # default doc is None
return ''
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Drivers for volumes.
"""
import os
import tempfile
import time
import urllib
from nova import exception
from nova import flags
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.volume import iscsi
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('volume_group',
default='nova-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('num_shell_tries',
default=3,
help='number of times to attempt to run flakey shell commands'),
cfg.IntOpt('num_iscsi_scan_tries',
default=3,
help='number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('iscsi_num_targets',
default=100,
help='Number of iscsi target ids per host'),
cfg.StrOpt('iscsi_target_prefix',
default='iqn.2010-10.org.openstack:',
help='prefix for iscsi volumes'),
cfg.StrOpt('iscsi_ip_address',
default='$my_ip',
help='use this ip for iscsi'),
cfg.IntOpt('iscsi_port',
default=3260,
help='The port that the iSCSI daemon is listening on'),
cfg.StrOpt('rbd_pool',
default='rbd',
help='the RADOS pool in which rbd volumes are stored'),
cfg.StrOpt('rbd_user',
default=None,
help='the RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='the libvirt uuid of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('volume_tmp_dir',
default=None,
help='where to store temporary image files if the volume '
'driver does not write them directly to the volume'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(volume_opts)
class VolumeDriver(object):
"""Executes commands relating to Volumes."""
def __init__(self, execute=utils.execute, *args, **kwargs):
# NOTE(vish): db is set by Manager
self.db = None
self.set_execute(execute)
def set_execute(self, execute):
self._execute = execute
def _try_execute(self, *command, **kwargs):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
self._execute(*command, **kwargs)
return True
except exception.ProcessExecutionError:
tries = tries + 1
if tries >= FLAGS.num_shell_tries:
raise
LOG.exception(_("Recovering from a failed execute. "
"Try number %s"), tries)
time.sleep(tries ** 2)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
run_as_root=True)
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
exception_message = (_("volume group %s doesn't exist")
% FLAGS.volume_group)
raise exception.VolumeBackendAPIException(data=exception_message)
def _create_volume(self, volume_name, sizestr):
self._try_execute('lvcreate', '-L', sizestr, '-n',
volume_name, FLAGS.volume_group, run_as_root=True)
# ISI
dev_name = '/dev/' + FLAGS.volume_group + '/' + volume_name
self._try_execute('mkfs.ext3',
dev_name, run_as_root=True)
# !ISI
def _copy_volume(self, srcstr, deststr, size_in_g):
# Use O_DIRECT to avoid thrashing the system buffer cache
direct_flags = ('iflag=direct', 'oflag=direct')
# Check whether O_DIRECT is supported
try:
self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
*direct_flags, run_as_root=True)
except exception.ProcessExecutionError:
direct_flags = ()
# Perform the copy
self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % (size_in_g * 1024), 'bs=1M',
*direct_flags, run_as_root=True)
def _volume_not_present(self, volume_name):
path_name = '%s/%s' % (FLAGS.volume_group, volume_name)
try:
self._try_execute('lvdisplay', path_name, run_as_root=True)
except Exception as e:
# If the volume isn't present
return True
return False
def _delete_volume(self, volume, size_in_g):
"""Deletes a logical volume."""
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
self._copy_volume('/dev/zero', self.local_path(volume), size_in_g)
dev_path = self.local_path(volume)
if os.path.exists(dev_path):
self._try_execute('dmsetup', 'remove', '-f', dev_path,
run_as_root=True)
self._try_execute('lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
self._escape_snapshot(volume['name'])),
run_as_root=True)
def _sizestr(self, size_in_g):
if int(size_in_g) == 0:
return '100M'
return '%sG' % size_in_g
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
def _escape_snapshot(self, snapshot_name):
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def create_volume(self, volume):
"""Creates a logical volume. Can optionally return a Dictionary of
changes to the volume object to be persisted."""
self._create_volume(volume['name'], self._sizestr(volume['size']))
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'], self._sizestr(volume['size']))
self._copy_volume(self.local_path(snapshot), self.local_path(volume),
snapshot['volume_size'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
# TODO(yamahata): lvm can't delete origin volume only without
# deleting derived snapshots. Can we do something fancy?
out, err = self._execute('lvdisplay', '--noheading',
'-C', '-o', 'Attr',
'%s/%s' % (FLAGS.volume_group,
volume['name']),
run_as_root=True)
# fake_execute returns None resulting unit test error
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume, volume['size'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
orig_lv_name = "%s/%s" % (FLAGS.volume_group, snapshot['volume_name'])
self._try_execute('lvcreate', '-L',
self._sizestr(snapshot['volume_size']),
'--name', self._escape_snapshot(snapshot['name']),
'--snapshot', orig_lv_name, run_as_root=True)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, snapshot['volume_size'])
def local_path(self, volume):
# NOTE(vish): stops deprecation warning
escaped_group = FLAGS.volume_group.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
raise NotImplementedError()
def create_export(self, context, volume):
"""Exports the volume. Can optionally return a Dictionary of changes
to the volume object to be persisted."""
raise NotImplementedError()
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
raise NotImplementedError()
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
raise NotImplementedError()
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
raise NotImplementedError()
def terminate_connection(self, volume, connector):
"""Disallow connection from connector"""
raise NotImplementedError()
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):
""" Callback for volume attached to instance."""
pass
def detach_volume(self, context, volume_id):
""" Callback for volume detached."""
pass
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service. If 'refresh' is
True, run the update first."""
return None
def do_setup(self, context):
"""Any initialization the volume driver does while starting"""
pass
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
raise NotImplementedError()
def copy_volume_to_image(self, context, volume, image_service, image_id):
"""Copy the volume to the specified image."""
raise NotImplementedError()
def clone_image(self, volume, image_location):
"""Create a volume efficiently from an existing image.
image_location is a string whose format depends on the
image service backend in use. The driver should use it
to determine whether cloning is possible.
Returns a boolean indicating whether cloning occurred
"""
return False
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
We make use of model provider properties as follows:
``provider_location``
if present, contains the iSCSI target information in the same
format as an ietadm discovery
i.e. '<ip>:<port>,<portal> <target IQN>'
``provider_auth``
if present, contains a space-separated triple:
'<auth method> <auth username> <auth password>'.
`CHAP` is the only auth_method in use at the moment.
"""
def __init__(self, *args, **kwargs):
self.tgtadm = iscsi.get_target_admin()
super(ISCSIDriver, self).__init__(*args, **kwargs)
def set_execute(self, execute):
super(ISCSIDriver, self).set_execute(execute)
self.tgtadm.set_execute(execute)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping ensure_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
else:
iscsi_target = 1 # dummy value when using TgtAdm
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
self.tgtadm.create_iscsi_target(iscsi_name, iscsi_target,
0, volume_path,
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
"""Ensure that target ids have been created in datastore."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
host_iscsi_targets = self.db.iscsi_target_count_by_host(context,
host)
if host_iscsi_targets >= FLAGS.iscsi_num_targets:
return
# NOTE(vish): Target ids start at 1, not 0.
for target_num in xrange(1, FLAGS.iscsi_num_targets + 1):
target = {'host': host, 'target_num': target_num}
self.db.iscsi_target_create_safe(context, target)
def create_export(self, context, volume):
"""Creates an export for a logical volume."""
#BOOKMARK(jdg)
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
model_update = {}
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
lun = 0
self._ensure_iscsi_targets(context, volume['host'])
iscsi_target = self.db.volume_allocate_iscsi_target(context,
volume['id'],
volume['host'])
else:
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
# NOTE(jdg): For TgtAdm case iscsi_name is the ONLY param we need
# should clean this all up at some point in the future
tid = self.tgtadm.create_iscsi_target(iscsi_name,
iscsi_target,
0,
volume_path)
model_update['provider_location'] = _iscsi_location(
FLAGS.iscsi_ip_address, tid, iscsi_name, lun)
return model_update
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
# NOTE(jdg): tgtadm doesn't use the iscsi_targets table
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
try:
iscsi_target = self.db.volume_get_iscsi_target_num(context,
volume['id'])
except exception.NotFound:
LOG.info(_("Skipping remove_export. No iscsi_target "
"provisioned for volume: %s"), volume['id'])
return
else:
iscsi_target = 0
try:
# NOTE: provider_location may be unset if the volume hasn't
# been exported
location = volume['provider_location'].split(' ')
iqn = location[1]
# ietadm show will exit with an error
# this export has already been removed
self.tgtadm.show_target(iscsi_target, iqn=iqn)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target "
"is presently exported for volume: %s"), volume['id'])
return
self.tgtadm.remove_iscsi_target(iscsi_target, 0, volume['id'])
def _do_iscsi_discovery(self, volume):
#TODO(justinsb): Deprecate discovery and use stored info
#NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
LOG.warn(_("ISCSI provider_location not stored, using discovery"))
volume_name = volume['name']
(out, _err) = self._execute('iscsiadm', '-m', 'discovery',
'-t', 'sendtargets', '-p', volume['host'],
run_as_root=True)
for target in out.splitlines():
if FLAGS.iscsi_ip_address in target and volume_name in target:
return target
return None
def _get_iscsi_properties(self, volume):
"""Gets iscsi configuration
We ideally get saved information in the volume entity, but fall back
to discovery if need be. Discovery may be completely removed in future
The properties are:
:target_discovered: boolean indicating whether discovery was used
:target_iqn: the IQN of the iSCSI target
:target_portal: the portal of the iSCSI target
:target_lun: the lun of the iSCSI target
:volume_id: the id of the volume (currently used by xen)
:auth_method:, :auth_username:, :auth_password:
the authentication details. Right now, either auth_method is not
present meaning no authentication, or auth_method == `CHAP`
meaning use CHAP with the specified credentials.
"""
properties = {}
location = volume['provider_location']
if location:
# provider_location is the same format as iSCSI discovery output
properties['target_discovered'] = False
else:
location = self._do_iscsi_discovery(volume)
if not location:
raise exception.InvalidVolume(_("Could not find iSCSI export "
" for volume %s") %
(volume['name']))
LOG.debug(_("ISCSI Discovery: Found %s") % (location))
properties['target_discovered'] = True
results = location.split(" ")
properties['target_portal'] = results[0].split(",")[0]
properties['target_iqn'] = results[1]
try:
properties['target_lun'] = int(results[2])
except (IndexError, ValueError):
if FLAGS.iscsi_helper == 'tgtadm':
properties['target_lun'] = 1
else:
properties['target_lun'] = 0
properties['volume_id'] = volume['id']
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
return properties
def _run_iscsiadm(self, iscsi_properties, iscsi_command):
(out, err) = self._execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The iscsi driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value::
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.0.1:3260',
'volume_id': 1,
}
}
"""
iscsi_properties = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def terminate_connection(self, volume, connector):
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
vol_uuid_file = 'volume-%s' % volume_id
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (FLAGS.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.PersistentVolumeFileNotFound(volume_id=volume_id)
# TODO(jdg): In the future move all of the dependent stuff into the
# cooresponding target admin class
if not isinstance(self.tgtadm, iscsi.TgtAdm):
tid = self.db.volume_get_iscsi_target_num(context, volume_id)
else:
tid = 0
try:
self.tgtadm.show_target(tid, iqn=iqn)
except exception.ProcessExecutionError, e:
# Instances remount read-only in this case.
# /etc/init.d/iscsitarget restart and rebooting nova-volume
# is better since ensure_export() works at boot time.
LOG.error(_("Cannot confirm exported volume "
"id:%(volume_id)s.") % locals())
raise
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with utils.file_open(volume_path, "wb") as image_file:
image_service.download(context, image_id, image_file)
def copy_volume_to_image(self, context, volume, image_service, image_id):
"""Copy the volume to the specified image."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with utils.file_open(volume_path) as volume_file:
image_service.update(context, image_id, {}, volume_file)
class FakeISCSIDriver(ISCSIDriver):
"""Logs calls instead of executing."""
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'iscsi',
'data': {}
}
def terminate_connection(self, volume, connector):
pass
@staticmethod
def fake_execute(cmd, *_args, **_kwargs):
"""Execute that simply logs the command."""
LOG.debug(_("FAKE ISCSI: %s"), cmd)
return (None, None)
class RBDDriver(VolumeDriver):
"""Implements RADOS block device (RBD) volume commands"""
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
(stdout, stderr) = self._execute('rados', 'lspools')
pools = stdout.split("\n")
if not FLAGS.rbd_pool in pools:
exception_message = (_("rbd has no pool %s") %
FLAGS.rbd_pool)
raise exception.VolumeBackendAPIException(data=exception_message)
def _supports_layering(self):
stdout, _ = self._execute('rbd', '--help')
return 'clone' in stdout
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100
else:
size = int(volume['size']) * 1024
args = ['rbd', 'create',
'--pool', FLAGS.rbd_pool,
'--size', size,
volume['name']]
if self._supports_layering():
args += ['--new-format']
self._try_execute(*args)
def _clone(self, volume, src_pool, src_image, src_snap):
self._try_execute('rbd', 'clone',
'--pool', src_pool,
'--image', src_image,
'--snap', src_snap,
'--dest-pool', FLAGS.rbd_pool,
'--dest', volume['name'])
def _resize(self, volume):
size = int(volume['size']) * 1024
self._try_execute('rbd', 'resize',
'--pool', FLAGS.rbd_pool,
'--image', volume['name'],
'--size', size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, FLAGS.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if int(volume['size']):
self._resize(volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
stdout, _ = self._execute('rbd', 'snap', 'ls',
'--pool', FLAGS.rbd_pool,
volume['name'])
if stdout.count('\n') > 1:
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._try_execute('rbd', 'rm',
'--pool', FLAGS.rbd_pool,
volume['name'])
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot"""
self._try_execute('rbd', 'snap', 'create',
'--pool', FLAGS.rbd_pool,
'--snap', snapshot['name'],
snapshot['volume_name'])
if self._supports_layering():
self._try_execute('rbd', 'snap', 'protect',
'--pool', FLAGS.rbd_pool,
'--snap', snapshot['name'],
snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot"""
if self._supports_layering():
try:
self._try_execute('rbd', 'snap', 'unprotect',
'--pool', FLAGS.rbd_pool,
'--snap', snapshot['name'],
snapshot['volume_name'])
except exception.ProcessExecutionError:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
self._try_execute('rbd', 'snap', 'rm',
'--pool', FLAGS.rbd_pool,
'--snap', snapshot['name'],
snapshot['volume_name'])
def local_path(self, volume):
"""Returns the path of the rbd volume."""
# This is the same as the remote path
# since qemu accesses it directly.
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume"""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume"""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (FLAGS.rbd_pool, volume['name']),
'auth_enabled': FLAGS.rbd_secret_uuid is not None,
'auth_username': FLAGS.rbd_user,
'secret_type': 'ceph',
'secret_uuid': FLAGS.rbd_secret_uuid,
}
}
def terminate_connection(self, volume, connector):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Image %s is not stored in rbd') % location
raise exception.ImageUnacceptable(reason)
pieces = map(urllib.unquote, location[len(prefix):].split('/'))
if any(map(lambda p: p == '', pieces)):
reason = _('Image %s has blank components') % location
raise exception.ImageUnacceptable(reason)
if len(pieces) != 4:
reason = _('Image %s is not an rbd snapshot') % location
raise exception.ImageUnacceptable(reason)
return pieces
def _get_fsid(self):
stdout, _ = self._execute('ceph', 'fsid')
return stdout.rstrip('\n')
def _is_cloneable(self, image_location):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable:
return False
if self._get_fsid() != fsid:
reason = _('%s is in a different ceph cluster') % image_location
LOG.debug(reason)
return False
# check that we can read the image
try:
self._execute('rbd', 'info',
'--pool', pool,
'--image', image,
'--snap', snapshot)
except exception.ProcessExecutionError:
LOG.debug(_('Unable to read image %s') % image_location)
return False
return True
def clone_image(self, volume, image_location):
if image_location is None or not self._is_cloneable(image_location):
return False
_, pool, image, snapshot = self._parse_location(image_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return True
def copy_image_to_volume(self, context, volume, image_service, image_id):
# TODO(jdurgin): replace with librbd
# this is a temporary hack, since rewriting this driver
# to use librbd would take too long
if FLAGS.volume_tmp_dir and not os.exists(FLAGS.volume_tmp_dir):
os.makedirs(FLAGS.volume_tmp_dir)
with tempfile.NamedTemporaryFile(dir=FLAGS.volume_tmp_dir) as tmp:
image_service.download(context, image_id, tmp)
# import creates the image, so we must remove it first
self._try_execute('rbd', 'rm',
'--pool', FLAGS.rbd_pool,
volume['name'])
self._try_execute('rbd', 'import',
'--pool', FLAGS.rbd_pool,
tmp.name, volume['name'])
class SheepdogDriver(VolumeDriver):
"""Executes commands relating to Sheepdog Volumes"""
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
#NOTE(francois-charlier) Since 0.24 'collie cluster info -r'
# gives short output, but for compatibility reason we won't
# use it and just check if 'running' is in the output.
(out, err) = self._execute('collie', 'cluster', 'info')
if not 'running' in out.split():
exception_message = _("Sheepdog is not working: %s") % out
raise exception.VolumeBackendAPIException(
data=exception_message)
except exception.ProcessExecutionError:
exception_message = _("Sheepdog is not working")
raise exception.NovaException(data=exception_message)
def create_volume(self, volume):
"""Creates a sheepdog volume"""
self._try_execute('qemu-img', 'create',
"sheepdog:%s" % volume['name'],
self._sizestr(volume['size']))
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a sheepdog volume from a snapshot."""
self._try_execute('qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (snapshot['volume_name'],
snapshot['name']),
"sheepdog:%s" % volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume"""
self._try_execute('collie', 'vdi', 'delete', volume['name'])
def create_snapshot(self, snapshot):
"""Creates a sheepdog snapshot"""
self._try_execute('qemu-img', 'snapshot', '-c', snapshot['name'],
"sheepdog:%s" % snapshot['volume_name'])
def delete_snapshot(self, snapshot):
"""Deletes a sheepdog snapshot"""
self._try_execute('collie', 'vdi', 'delete', snapshot['volume_name'],
'-s', snapshot['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
def ensure_export(self, context, volume):
"""Safely and synchronously recreates an export for a logical volume"""
pass
def create_export(self, context, volume):
"""Exports the volume"""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume"""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'sheepdog',
'data': {
'name': volume['name']
}
}
def terminate_connection(self, volume, connector):
pass
class LoggingVolumeDriver(VolumeDriver):
"""Logs and records calls, for unit tests."""
def check_for_setup_error(self):
pass
def create_volume(self, volume):
self.log_action('create_volume', volume)
def delete_volume(self, volume):
self.log_action('delete_volume', volume)
def local_path(self, volume):
print "local_path not implemented"
raise NotImplementedError()
def ensure_export(self, context, volume):
self.log_action('ensure_export', volume)
def create_export(self, context, volume):
self.log_action('create_export', volume)
def remove_export(self, context, volume):
self.log_action('remove_export', volume)
def initialize_connection(self, volume, connector):
self.log_action('initialize_connection', volume)
def terminate_connection(self, volume, connector):
self.log_action('terminate_connection', volume)
def check_for_export(self, context, volume_id):
self.log_action('check_for_export', volume_id)
_LOGS = []
@staticmethod
def clear_logs():
LoggingVolumeDriver._LOGS = []
@staticmethod
def log_action(action, parameters):
"""Logs the command."""
LOG.debug(_("LoggingVolumeDriver: %s") % (action))
log_dictionary = {}
if parameters:
log_dictionary = dict(parameters)
log_dictionary['action'] = action
LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
LoggingVolumeDriver._LOGS.append(log_dictionary)
@staticmethod
def all_logs():
return LoggingVolumeDriver._LOGS
@staticmethod
def logs_like(action, **kwargs):
matches = []
for entry in LoggingVolumeDriver._LOGS:
if entry['action'] != action:
continue
match = True
for k, v in kwargs.iteritems():
if entry.get(k) != v:
match = False
break
if match:
matches.append(entry)
return matches
def _iscsi_location(ip, target, iqn, lun=None):
return "%s:%s,%s %s %s" % (ip, FLAGS.iscsi_port, target, iqn, lun)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
registry_name: str,
replication_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"registryName": _SERIALIZER.url("registry_name", registry_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
"replicationName": _SERIALIZER.url("replication_name", replication_name, 'str', max_length=50, min_length=5, pattern=r'^[a-zA-Z0-9]*$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class ReplicationsOperations(object):
"""ReplicationsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2021_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> Iterable["_models.ReplicationListResult"]:
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.containerregistry.v2021_12_01_preview.models.ReplicationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ReplicationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> "_models.Replication":
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Replication, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _create_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication, 'Replication')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication: The parameters for creating a replication.
:type replication: ~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication=replication,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication_update_parameters, 'ReplicationUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> LROPoller["_models.Replication"]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2021_12_01_preview.models.ReplicationUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.containerregistry.v2021_12_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication_update_parameters=replication_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
| |
import logging
import os
import re
import uuid
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext as _
from core.exceptions import ServiceUnavailable
from core.helpers import generate_ssh_keys, make_rest_post_call
from github.api import get_branch_details, get_default_branch
logger = logging.getLogger(__name__)
class Owner(models.Model):
""" Represents a github user or organization that owns repos
:param name: The unique name for this user or org (taken from github)
:param github_id: Unique ID github has assigned the owner
"""
name = models.CharField(max_length=100)
github_id = models.PositiveIntegerField(unique=True)
def __str__(self):
return self.name
class Meta(object):
verbose_name = _('Owner')
verbose_name_plural = _('Owners')
class Site(models.Model):
""" Represents a 'deployed' or soon-to-be deployed static site.
:param owner: Ref to the owner of the project
:param name: The name of the project on github
:param github_id: Unique ID github has assigned the project
:param deploy_key: Token used to access the project on github
:param deploy_key_secret: Secret portion of the deploy_key
:param deploy_key_id: The id in Github's DB for the key they have stored
:param webhook_id: The id in Github's DB for the webhook they have stored
:param is_active: If False, means the site is marked for deletion
"""
DEFAULT_ENV = _('Production')
owner = models.ForeignKey(Owner, related_name='sites')
name = models.CharField(max_length=100)
github_id = models.PositiveIntegerField(unique=True)
deploy_key = models.TextField(blank=True, null=True)
deploy_key_secret = models.TextField(blank=True, null=True)
deploy_key_id = models.CharField(blank=True, null=True, max_length=12)
webhook_id = models.CharField(blank=True, null=True, max_length=12)
is_active = models.BooleanField(default=True)
def get_deployable_environment(self, event, is_tag_event=False):
if self.is_active:
for env in self.environments.all():
if (env.deploy_type == Environment.BRANCH and not
is_tag_event and event.endswith(env.branch)):
return env
elif (env.deploy_type == Environment.TAG and
is_tag_event and re.match(env.tag_regex, event)):
return env
return None
def get_newest_commit(self, user):
""" Calls github and retrieves the current git hash of the most recent
code push to the default branch of the repo
"""
branch = get_default_branch(self, user)
git_hash = get_branch_details(self, user, branch)
return (branch, git_hash)
def get_most_recent_build(self):
return BranchBuild.objects.filter(site=self)\
.order_by('-created').first()
def save(self, user=None, *args, **kwargs):
if not self.deploy_key:
self.deploy_key, self.deploy_key_secret = generate_ssh_keys()
if not self.environments.exists() and user:
branch = get_default_branch(self, user)
self.environments.create(
name=self.DEFAULT_ENV, deploy_type=Environment.PROMOTE)
self.environments.create(name='Staging', branch=branch)
super(Site, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta(object):
verbose_name = _('Site')
verbose_name_plural = _('Sites')
class Build(models.Model):
""" Represents built code that has been deployed to a folder. This build
can be referenced by the HTTP server for routing
:param site: Reference to the project for this build instance
:param git_hash: If a branch build, the git hash of the deployed code
:param created: Date this code was built
:param deployed: Date this code was last deployed to an environment
:param path: The path of the site on the static server
"""
NEW = 'NEW'
BUILDING = 'BLD'
SUCCESS = 'SUC'
FAILED = 'FAL'
STATUS_CHOICES = (
(NEW, _('new')),
(BUILDING, _('building')),
(SUCCESS, _('success')),
(FAILED, _('failed'))
)
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
site = models.ForeignKey(Site, related_name='builds')
created = models.DateTimeField(auto_now_add=True, editable=False)
status = models.CharField(max_length=3, choices=STATUS_CHOICES,
default=NEW)
@property
def path(self):
return "{0}/{1}".format(self.site.github_id, self.uuid)
def can_build(self):
return self.status is not self.BUILDING
def deploy(self, environment):
if self.can_build():
callback = os.environ['API_BASE_URL'] + \
reverse('webhook:builder', args=[str(self.uuid), ])
url = os.environ['BUILDER_URL'] + '/build'
headers = {'content-type': 'application/json'}
body = {
"deploy_key": self.site.deploy_key_secret,
"branch": self.branch,
"git_hash": self.git_hash,
"repo_owner": self.site.owner.name,
"path": self.path,
"repo_name": self.site.name,
"environment": environment.name.lower(),
'callback': callback
}
try:
make_rest_post_call(url, headers, body)
except:
logger.warn('Builder down?')
msg = 'Service temporarily unavailable: franklin-build'
raise ServiceUnavailable(detail=msg)
self.status = self.BUILDING
self.save()
else:
logger.error("Build being/been built by builder...")
def __str__(self):
return '%s - %s' % (self.status, self.created)
class BranchBuild(Build):
""" Flavor of build that was created from a branch
:param branch: If a branch build, the name of the branch
:param git_hash: If a branch build, the git hash of the deployed code
"""
git_hash = models.CharField(max_length=40)
branch = models.CharField(max_length=100)
def __str__(self):
return '%s %s' % (self.site.name, self.uuid)
class Meta(object):
verbose_name = _('Branch Build')
verbose_name_plural = _('Branch Builds')
class Environment(models.Model):
""" Represents the configuration for a specific deployed environment
:param site: Ref to the project this environment is hosting
:param name: Name for the environment.
:param deploy_type: What event will trigger a build. (push to branch,
tagging a commit, or manually by an admin user)
:param branch: Code branch that is used for deploying
:param tag_regex: Tag events matching this regular expression will be
deployed (If deploy_type is tag)
:param url: The url builder has deployed this project to
:param past_builds: Ref to all builds that can be marked current_deployed
:param status: The current status of the deployed version on Franklin
"""
BRANCH = 'BCH'
TAG = 'TAG'
PROMOTE = 'PRO'
DEPLOY_CHOICES = (
(BRANCH, _('branch')),
(TAG, _('tag')),
(PROMOTE, _('promote'))
)
site = models.ForeignKey(Site, related_name='environments')
name = models.CharField(max_length=100, default='')
deploy_type = models.CharField(
max_length=3, choices=DEPLOY_CHOICES, default=BRANCH)
branch = models.CharField(max_length=100, default='master')
tag_regex = models.CharField(max_length=100, blank=True)
url = models.CharField(max_length=100, unique=True)
past_builds = models.ManyToManyField(
Build, related_name='environments', through='Deploy', blank=True)
def get_current_deploy(self):
return self.past_builds.filter(status=Build.SUCCESS)\
.order_by('-created').first()
def save(self, *args, **kwargs):
if not self.url:
if self.name == self.site.DEFAULT_ENV:
self.url = "{0}.{1}".format(self.site.name.lower(),
os.environ['BASE_URL'])
else:
self.url = "{0}-{1}.{2}".format(self.site.name.lower(),
self.name.lower(),
os.environ['BASE_URL'])
super(Environment, self).save(*args, **kwargs)
def __str__(self):
return '%s %s' % (self.site.name, self.name)
class Meta(object):
verbose_name = _('Environment')
verbose_name_plural = _('Environments')
unique_together = ('name', 'site')
class Deploy(models.Model):
""" A deployment event; represented as a link between an environment and a
build object.
:param environment: link to environment
:param build: link to build
:param deployed: Date this code was last deployed to an environment
"""
environment = models.ForeignKey(Environment, on_delete=models.CASCADE)
build = models.ForeignKey(Build, on_delete=models.CASCADE)
deployed = models.DateTimeField(auto_now_add=True, editable=False)
def __str__(self):
return '%s %s' % (self.environment.site.name, self.deployed)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import decimal
import json
from django.contrib.auth import logout as auth_logout
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views.generic import DetailView
from django.views.generic import FormView
from django.views.generic import TemplateView
from django.views.generic import View
from django.utils.encoding import smart_str
from braces.views import CsrfExemptMixin
from braces.views import FormValidMessageMixin
from braces.views import LoginRequiredMixin
from braces.views import SelectRelatedMixin
import stripe
from .forms import PlanForm, CancelSubscriptionForm
from .mixins import PaymentsContextMixin, SubscriptionMixin
from .models import CurrentSubscription
from .models import Customer
from .models import Event
from .models import EventProcessingException
from .settings import PLAN_LIST
from .settings import subscriber_request_callback
from .settings import PRORATION_POLICY_FOR_UPGRADES
from .settings import CANCELLATION_AT_PERIOD_END
from .sync import sync_subscriber
# ============================================================================ #
# Account Views #
# ============================================================================ #
class AccountView(LoginRequiredMixin, SelectRelatedMixin, TemplateView):
"""Shows account details including customer and subscription details."""
template_name = "djstripe/account.html"
def get_context_data(self, *args, **kwargs):
context = super(AccountView, self).get_context_data(**kwargs)
customer, created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request))
context['customer'] = customer
try:
context['subscription'] = customer.current_subscription
except CurrentSubscription.DoesNotExist:
context['subscription'] = None
context['plans'] = PLAN_LIST
return context
# ============================================================================ #
# Billing Views #
# ============================================================================ #
class ChangeCardView(LoginRequiredMixin, PaymentsContextMixin, DetailView):
"""TODO: Needs to be refactored to leverage forms and context data."""
template_name = "djstripe/change_card.html"
def get_object(self):
if hasattr(self, "customer"):
return self.customer
self.customer, created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request))
return self.customer
def post(self, request, *args, **kwargs):
"""
TODO: Raise a validation error when a stripe token isn't passed.
Should be resolved when a form is used.
"""
customer = self.get_object()
try:
send_invoice = customer.card_fingerprint == ""
customer.update_card(
request.POST.get("stripe_token")
)
if send_invoice:
customer.send_invoice()
customer.retry_unpaid_invoices()
except stripe.StripeError as exc:
messages.info(request, "Stripe Error")
return render(
request,
self.template_name,
{
"customer": self.get_object(),
"stripe_error": str(exc)
}
)
messages.info(request, "Your card is now updated.")
return redirect(self.get_post_success_url())
def get_post_success_url(self):
""" Makes it easier to do custom dj-stripe integrations. """
return reverse("djstripe:account")
class HistoryView(LoginRequiredMixin, SelectRelatedMixin, DetailView):
template_name = "djstripe/history.html"
model = Customer
select_related = ["invoice"]
def get_object(self):
customer, created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request))
return customer
class SyncHistoryView(CsrfExemptMixin, LoginRequiredMixin, View):
"""TODO: Needs to be refactored to leverage context data."""
template_name = "djstripe/includes/_history_table.html"
def post(self, request, *args, **kwargs):
return render(
request,
self.template_name,
{"customer": sync_subscriber(subscriber_request_callback(request))}
)
# ============================================================================ #
# Subscription Views #
# ============================================================================ #
class SubscribeFormView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):
"""TODO: Add stripe_token to the form and use form_valid() instead of post()."""
form_class = PlanForm
template_name = "djstripe/subscribe_form.html"
success_url = reverse_lazy("djstripe:history")
form_valid_message = "You are now subscribed!"
def post(self, request, *args, **kwargs):
"""
Handles POST requests, instantiating a form instance with the passed
POST variables and then checked for validity.
"""
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
try:
customer, created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request))
customer.update_card(self.request.POST.get("stripe_token"))
customer.subscribe(form.cleaned_data["plan"])
except stripe.StripeError as exc:
form.add_error(None, str(exc))
return self.form_invalid(form)
# redirect to confirmation page
return self.form_valid(form)
else:
return self.form_invalid(form)
class ChangePlanView(LoginRequiredMixin, FormValidMessageMixin, SubscriptionMixin, FormView):
"""
TODO: This logic should be in form_valid() instead of post().
TODO: Work in a trial_days kwarg
Also, this should be combined with SubscribeFormView.
"""
form_class = PlanForm
template_name = "djstripe/subscribe_form.html"
success_url = reverse_lazy("djstripe:history")
form_valid_message = "You've just changed your plan!"
def post(self, request, *args, **kwargs):
form = PlanForm(request.POST)
try:
customer = subscriber_request_callback(request).customer
except Customer.DoesNotExist as exc:
form.add_error(None, "You must already be subscribed to a plan before you can change it.")
return self.form_invalid(form)
if form.is_valid():
try:
# When a customer upgrades their plan, and DJSTRIPE_PRORATION_POLICY_FOR_UPGRADES is set to True,
# we force the proration of the current plan and use it towards the upgraded plan,
# no matter what DJSTRIPE_PRORATION_POLICY is set to.
if PRORATION_POLICY_FOR_UPGRADES:
current_subscription_amount = customer.current_subscription.amount
selected_plan_name = form.cleaned_data["plan"]
selected_plan = next(
(plan for plan in PLAN_LIST if plan["plan"] == selected_plan_name)
)
selected_plan_price = selected_plan["price"] / decimal.Decimal("100")
# Is it an upgrade?
if selected_plan_price > current_subscription_amount:
customer.subscribe(selected_plan_name, prorate=True)
else:
customer.subscribe(selected_plan_name)
else:
customer.subscribe(form.cleaned_data["plan"])
except stripe.StripeError as exc:
form.add_error(None, str(exc))
return self.form_invalid(form)
return self.form_valid(form)
else:
return self.form_invalid(form)
class CancelSubscriptionView(LoginRequiredMixin, SubscriptionMixin, FormView):
template_name = "djstripe/cancel_subscription.html"
form_class = CancelSubscriptionForm
success_url = reverse_lazy("djstripe:account")
def form_valid(self, form):
customer, created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request))
current_subscription = customer.cancel_subscription(
at_period_end=CANCELLATION_AT_PERIOD_END)
if current_subscription.status == current_subscription.STATUS_CANCELLED:
# If no pro-rate, they get kicked right out.
messages.info(self.request, "Your subscription is now cancelled.")
# logout the user
auth_logout(self.request)
return redirect("home")
else:
# If pro-rate, they get some time to stay.
messages.info(self.request, "Your subscription status is now '{status}' until '{period_end}'".format(
status=current_subscription.status, period_end=current_subscription.current_period_end)
)
return super(CancelSubscriptionView, self).form_valid(form)
# ============================================================================ #
# Web Services #
# ============================================================================ #
class WebHook(CsrfExemptMixin, View):
def post(self, request, *args, **kwargs):
body = smart_str(request.body)
data = json.loads(body)
if Event.objects.filter(stripe_id=data["id"]).exists():
EventProcessingException.objects.create(
data=data,
message="Duplicate event record",
traceback=""
)
else:
event = Event.objects.create(
stripe_id=data["id"],
kind=data["type"],
livemode=data["livemode"],
webhook_message=data
)
event.validate()
event.process()
return HttpResponse()
| |
from functools import partial
import os
import warnings
import numpy as np
from scipy import sparse, linalg, stats
from numpy.testing import (assert_equal, assert_array_equal,
assert_array_almost_equal)
from nose.tools import assert_true, assert_raises
from mne.parallel import _force_serial
from mne.stats.cluster_level import (permutation_cluster_test,
permutation_cluster_1samp_test,
spatio_temporal_cluster_test,
spatio_temporal_cluster_1samp_test,
ttest_1samp_no_p, summarize_clusters_stc)
from mne.utils import run_tests_if_main, slow_test, _TempDir, catch_logging
warnings.simplefilter('always') # enable b/c these tests throw warnings
n_space = 50
def _get_conditions():
noise_level = 20
n_time_1 = 20
n_time_2 = 13
normfactor = np.hanning(20).sum()
rng = np.random.RandomState(42)
condition1_1d = rng.randn(n_time_1, n_space) * noise_level
for c in condition1_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
condition2_1d = rng.randn(n_time_2, n_space) * noise_level
for c in condition2_1d:
c[:] = np.convolve(c, np.hanning(20), mode="same") / normfactor
pseudoekp = 10 * np.hanning(25)[None, :]
condition1_1d[:, 25:] += pseudoekp
condition2_1d[:, 25:] -= pseudoekp
condition1_2d = condition1_1d[:, :, np.newaxis]
condition2_2d = condition2_1d[:, :, np.newaxis]
return condition1_1d, condition2_1d, condition1_2d, condition2_2d
def test_cache_dir():
"""Test use of cache dir."""
tempdir = _TempDir()
orig_dir = os.getenv('MNE_CACHE_DIR', None)
orig_size = os.getenv('MNE_MEMMAP_MIN_SIZE', None)
rng = np.random.RandomState(0)
X = rng.randn(9, 2, 10)
try:
os.environ['MNE_MEMMAP_MIN_SIZE'] = '1K'
os.environ['MNE_CACHE_DIR'] = tempdir
# Fix error for #1507: in-place when memmapping
with catch_logging() as log_file:
permutation_cluster_1samp_test(
X, buffer_size=None, n_jobs=2, n_permutations=1,
seed=0, stat_fun=ttest_1samp_no_p, verbose=False)
# ensure that non-independence yields warning
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
assert_true('independently' not in log_file.getvalue())
with warnings.catch_warnings(record=True): # independently
permutation_cluster_1samp_test(
X, buffer_size=10, n_jobs=2, n_permutations=1,
seed=0, stat_fun=stat_fun, verbose=False)
assert_true('independently' in log_file.getvalue())
finally:
if orig_dir is not None:
os.environ['MNE_CACHE_DIR'] = orig_dir
else:
del os.environ['MNE_CACHE_DIR']
if orig_size is not None:
os.environ['MNE_MEMMAP_MIN_SIZE'] = orig_size
else:
del os.environ['MNE_MEMMAP_MIN_SIZE']
def test_permutation_step_down_p():
"""Test cluster level permutations with step_down_p."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph # noqa: F401,E501
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
X = rng.randn(9, 2, 10)
# add some significant points
X[:, 0:2, 0:2] += 2 # span two time points and two spatial points
X[:, 1, 5:9] += 0.5 # span four time points with 4x smaller amplitude
thresh = 2
# make sure it works when we use ALL points in step-down
t, clusters, p, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=1.0)
# make sure using step-down will actually yield improvements sometimes
t, clusters, p_old, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.0)
assert_equal(np.sum(p_old < 0.05), 1) # just spatial cluster
t, clusters, p_new, H0 = \
permutation_cluster_1samp_test(X, threshold=thresh,
step_down_p=0.05)
assert_equal(np.sum(p_new < 0.05), 2) # time one rescued
assert_true(np.all(p_old >= p_new))
def test_cluster_permutation_test():
"""Test cluster level permutations tests."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
for condition1, condition2 in zip((condition1_1d, condition1_2d),
(condition2_1d, condition2_2d)):
T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
[condition1, condition2], n_permutations=100, tail=1, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs, clusters, cluster_p_values, hist = permutation_cluster_test(
[condition1, condition2], n_permutations=100, tail=0, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
T_obs, clusters, cluster_p_values_buff, hist =\
permutation_cluster_test([condition1, condition2],
n_permutations=100, tail=0, seed=1,
n_jobs=2, buffer_size=buffer_size)
assert_array_equal(cluster_p_values, cluster_p_values_buff)
@slow_test
def test_cluster_permutation_t_test():
"""Test cluster level permutations T-test."""
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
# use a very large sigma to make sure Ts are not independent
stat_funs = [ttest_1samp_no_p,
partial(ttest_1samp_no_p, sigma=1e-1)]
for stat_fun in stat_funs:
for condition1 in (condition1_1d, condition1_2d):
# these are so significant we can get away with fewer perms
T_obs, clusters, cluster_p_values, hist =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=0, seed=1,
buffer_size=None)
assert_equal(np.sum(cluster_p_values < 0.05), 1)
T_obs_pos, c_1, cluster_p_values_pos, _ =\
permutation_cluster_1samp_test(condition1, n_permutations=100,
tail=1, threshold=1.67, seed=1,
stat_fun=stat_fun,
buffer_size=None)
T_obs_neg, _, cluster_p_values_neg, _ =\
permutation_cluster_1samp_test(-condition1, n_permutations=100,
tail=-1, threshold=-1.67,
seed=1, stat_fun=stat_fun,
buffer_size=None)
assert_array_equal(T_obs_pos, -T_obs_neg)
assert_array_equal(cluster_p_values_pos < 0.05,
cluster_p_values_neg < 0.05)
# test with 2 jobs and buffer_size enabled
buffer_size = condition1.shape[1] // 10
with warnings.catch_warnings(record=True): # independently
T_obs_neg_buff, _, cluster_p_values_neg_buff, _ = \
permutation_cluster_1samp_test(
-condition1, n_permutations=100, tail=-1,
threshold=-1.67, seed=1, n_jobs=2, stat_fun=stat_fun,
buffer_size=buffer_size)
assert_array_equal(T_obs_neg, T_obs_neg_buff)
assert_array_equal(cluster_p_values_neg, cluster_p_values_neg_buff)
@slow_test
def test_cluster_permutation_with_connectivity():
"""Test cluster level permutations with connectivity matrix."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
n_pts = condition1_1d.shape[1]
# we don't care about p-values in any of these, so do fewer permutations
args = dict(seed=None, max_step=1, exclude=None,
step_down_p=0, t_power=1, threshold=1.67,
check_disjoint=False, n_permutations=50)
did_warn = False
for X1d, X2d, func, spatio_temporal_func in \
[(condition1_1d, condition1_2d,
permutation_cluster_1samp_test,
spatio_temporal_cluster_1samp_test),
([condition1_1d, condition2_1d],
[condition1_2d, condition2_2d],
permutation_cluster_test,
spatio_temporal_cluster_test)]:
out = func(X1d, **args)
connectivity = grid_to_graph(1, n_pts)
out_connectivity = func(X1d, connectivity=connectivity, **args)
assert_array_equal(out[0], out_connectivity[0])
for a, b in zip(out_connectivity[1], out[1]):
assert_array_equal(out[0][a], out[0][b])
assert_true(np.all(a[b]))
# test spatio-temporal w/o time connectivity (repeat spatial pattern)
connectivity_2 = sparse.coo_matrix(
linalg.block_diag(connectivity.asfptype().todense(),
connectivity.asfptype().todense()))
if isinstance(X1d, list):
X1d_2 = [np.concatenate((x, x), axis=1) for x in X1d]
else:
X1d_2 = np.concatenate((X1d, X1d), axis=1)
out_connectivity_2 = func(X1d_2, connectivity=connectivity_2, **args)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_2[0][:split])
assert_array_equal(out[0], out_connectivity_2[0][split:])
# make sure we really got 2x the number of original clusters
n_clust_orig = len(out[1])
assert_true(len(out_connectivity_2[1]) == 2 * n_clust_orig)
# Make sure that we got the old ones back
data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
data_2 = set([np.sum(out_connectivity_2[0][a]) for a in
out_connectivity_2[1][:]])
assert_true(len(data_1.intersection(data_2)) == len(data_1))
# now use the other algorithm
if isinstance(X1d, list):
X1d_3 = [np.reshape(x, (-1, 2, n_space)) for x in X1d_2]
else:
X1d_3 = np.reshape(X1d_2, (-1, 2, n_space))
out_connectivity_3 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=0, threshold=1.67,
check_disjoint=True)
# make sure we were operating on the same values
split = len(out[0])
assert_array_equal(out[0], out_connectivity_3[0][0])
assert_array_equal(out[0], out_connectivity_3[0][1])
# make sure we really got 2x the number of original clusters
assert_true(len(out_connectivity_3[1]) == 2 * n_clust_orig)
# Make sure that we got the old ones back
data_1 = set([np.sum(out[0][b[:n_pts]]) for b in out[1]])
data_2 = set([np.sum(out_connectivity_3[0][a[0], a[1]]) for a in
out_connectivity_3[1]])
assert_true(len(data_1.intersection(data_2)) == len(data_1))
# test new versus old method
out_connectivity_4 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=2, threshold=1.67)
out_connectivity_5 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=1, threshold=1.67)
# clusters could be in a different order
sums_4 = [np.sum(out_connectivity_4[0][a])
for a in out_connectivity_4[1]]
sums_5 = [np.sum(out_connectivity_4[0][a])
for a in out_connectivity_5[1]]
sums_4 = np.sort(sums_4)
sums_5 = np.sort(sums_5)
assert_array_almost_equal(sums_4, sums_5)
if not _force_serial:
assert_raises(ValueError, spatio_temporal_func, X1d_3,
n_permutations=1, connectivity=connectivity,
max_step=1, threshold=1.67, n_jobs=-1000)
# not enough TFCE params
assert_raises(KeyError, spatio_temporal_func, X1d_3,
connectivity=connectivity, threshold=dict(me='hello'))
# too extreme a start threshold
with warnings.catch_warnings(record=True) as w:
spatio_temporal_func(X1d_3, connectivity=connectivity,
threshold=dict(start=10, step=1))
if not did_warn:
assert_true(len(w) == 1)
did_warn = True
# too extreme a start threshold
assert_raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=-1,
threshold=dict(start=1, step=-1))
assert_raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=-1,
threshold=dict(start=-1, step=1))
# wrong type for threshold
assert_raises(TypeError, spatio_temporal_func, X1d_3,
connectivity=connectivity, threshold=[])
# wrong value for tail
assert_raises(ValueError, spatio_temporal_func, X1d_3,
connectivity=connectivity, tail=2)
# make sure it actually found a significant point
out_connectivity_6 = spatio_temporal_func(X1d_3, n_permutations=50,
connectivity=connectivity,
max_step=1,
threshold=dict(start=1,
step=1))
assert_true(np.min(out_connectivity_6[2]) < 0.05)
@slow_test
def test_permutation_connectivity_equiv():
"""Test cluster level permutations with and without connectivity."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
rng = np.random.RandomState(0)
# subjects, time points, spatial points
n_time = 2
n_space = 4
X = rng.randn(6, n_time, n_space)
# add some significant points
X[:, :, 0:2] += 10 # span two time points and two spatial points
X[:, 1, 3] += 20 # span one time point
max_steps = [1, 1, 1, 2]
# This will run full algorithm in two ways, then the ST-algorithm in 2 ways
# All of these should give the same results
conns = [None, grid_to_graph(n_time, n_space),
grid_to_graph(1, n_space), grid_to_graph(1, n_space)]
stat_map = None
thresholds = [2, dict(start=1.5, step=1.0)]
sig_counts = [2, 5]
sdps = [0, 0.05, 0.05]
ots = ['mask', 'mask', 'indices']
stat_fun = partial(ttest_1samp_no_p, sigma=1e-3)
for thresh, count in zip(thresholds, sig_counts):
cs = None
ps = None
for max_step, conn in zip(max_steps, conns):
for sdp, ot in zip(sdps, ots):
t, clusters, p, H0 = \
permutation_cluster_1samp_test(
X, threshold=thresh, connectivity=conn, n_jobs=2,
max_step=max_step, stat_fun=stat_fun,
step_down_p=sdp, out_type=ot)
# make sure our output datatype is correct
if ot == 'mask':
assert_true(isinstance(clusters[0], np.ndarray))
assert_true(clusters[0].dtype == bool)
assert_array_equal(clusters[0].shape, X.shape[1:])
else: # ot == 'indices'
assert_true(isinstance(clusters[0], tuple))
# make sure all comparisons were done; for TFCE, no perm
# should come up empty
if count == 8:
assert_true(not np.any(H0 == 0))
inds = np.where(p < 0.05)[0]
assert_true(len(inds) == count)
this_cs = [clusters[ii] for ii in inds]
this_ps = p[inds]
this_stat_map = np.zeros((n_time, n_space), dtype=bool)
for ci, c in enumerate(this_cs):
if isinstance(c, tuple):
this_c = np.zeros((n_time, n_space), bool)
for x, y in zip(c[0], c[1]):
this_stat_map[x, y] = True
this_c[x, y] = True
this_cs[ci] = this_c
c = this_c
this_stat_map[c] = True
if cs is None:
ps = this_ps
cs = this_cs
if stat_map is None:
stat_map = this_stat_map
assert_array_equal(ps, this_ps)
assert_true(len(cs) == len(this_cs))
for c1, c2 in zip(cs, this_cs):
assert_array_equal(c1, c2)
assert_array_equal(stat_map, this_stat_map)
@slow_test
def spatio_temporal_cluster_test_connectivity():
"""Test spatio-temporal cluster permutations."""
try:
try:
from sklearn.feature_extraction.image import grid_to_graph
except ImportError:
from scikits.learn.feature_extraction.image import grid_to_graph
except ImportError:
return
condition1_1d, condition2_1d, condition1_2d, condition2_2d = \
_get_conditions()
rng = np.random.RandomState(0)
noise1_2d = rng.randn(condition1_2d.shape[0], condition1_2d.shape[1], 10)
data1_2d = np.transpose(np.dstack((condition1_2d, noise1_2d)), [0, 2, 1])
noise2_d2 = rng.randn(condition2_2d.shape[0], condition2_2d.shape[1], 10)
data2_2d = np.transpose(np.dstack((condition2_2d, noise2_d2)), [0, 2, 1])
conn = grid_to_graph(data1_2d.shape[-1], 1)
threshold = dict(start=4.0, step=2)
T_obs, clusters, p_values_conn, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d], connectivity=conn,
n_permutations=50, tail=1, seed=1,
threshold=threshold, buffer_size=None)
buffer_size = data1_2d.size // 10
T_obs, clusters, p_values_no_conn, hist = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=buffer_size)
assert_equal(np.sum(p_values_conn < 0.05), np.sum(p_values_no_conn < 0.05))
# make sure results are the same without buffer_size
T_obs, clusters, p_values2, hist2 = \
spatio_temporal_cluster_test([data1_2d, data2_2d],
n_permutations=50, tail=1, seed=1,
threshold=threshold, n_jobs=2,
buffer_size=None)
assert_array_equal(p_values_no_conn, p_values2)
assert_raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=1, threshold=-2.)
assert_raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=-1, threshold=2.)
assert_raises(ValueError, spatio_temporal_cluster_test,
[data1_2d, data2_2d], tail=0, threshold=-1)
def ttest_1samp(X):
"""Return T-values."""
return stats.ttest_1samp(X, 0)[0]
def test_summarize_clusters():
"""Test cluster summary stcs."""
clu = (np.random.random([1, 20484]),
[(np.array([0]), np.array([0, 2, 4]))],
np.array([0.02, 0.1]),
np.array([12, -14, 30]))
stc_sum = summarize_clusters_stc(clu)
assert_true(stc_sum.data.shape[1] == 2)
clu[2][0] = 0.3
assert_raises(RuntimeError, summarize_clusters_stc, clu)
run_tests_if_main()
| |
#!/usr/bin/env python
"""
Project-wide application configuration.
DO NOT STORE SECRETS, PASSWORDS, ETC. IN THIS FILE.
They will be exposed to users. Use environment variables instead.
See get_secrets() below for a fast way to access them.
"""
import os
from authomatic.providers import oauth2
from authomatic import Authomatic
"""
NAMES
"""
# Project name to be used in urls
# Use dashes, not underscores!
PROJECT_SLUG = 'ferguson-project'
# Project name to be used in file paths
PROJECT_FILENAME = 'ferguson-project'
# The name of the repository containing the source
REPOSITORY_NAME = 'ferguson-project'
GITHUB_USERNAME = 'stlpublicradio'
REPOSITORY_URL = 'git@github.com:%s/%s.git' % (GITHUB_USERNAME, REPOSITORY_NAME)
REPOSITORY_ALT_URL = None # 'git@bitbucket.org:nprapps/%s.git' % REPOSITORY_NAME'
# Project name used for assets rig
# Should stay the same, even if PROJECT_SLUG changes
ASSETS_SLUG = 'ferguson-project'
"""
DEPLOYMENT
"""
PRODUCTION_S3_BUCKET = {
'bucket_name': 'apps.stlpublicradio.org',
'region': 'us-east-1'
}
STAGING_S3_BUCKET = {
'bucket_name': 'stlpr-stg',
'region': 'us-east-1'
}
ASSETS_S3_BUCKET = {
'bucket_name': 'stlpr-assets',
'region': 'us-east-1'
}
DEFAULT_MAX_AGE = 20
PRODUCTION_SERVERS = ['']
STAGING_SERVERS = ['']
# Should code be deployed to the web/cron servers?
DEPLOY_TO_SERVERS = False
SERVER_USER = 'ubuntu'
SERVER_PYTHON = 'python2.7'
SERVER_PROJECT_PATH = '/home/%s/apps/%s' % (SERVER_USER, PROJECT_FILENAME)
SERVER_REPOSITORY_PATH = '%s/repository' % SERVER_PROJECT_PATH
SERVER_VIRTUALENV_PATH = '%s/virtualenv' % SERVER_PROJECT_PATH
# Should the crontab file be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_CRONTAB = False
# Should the service configurations be installed on the servers?
# If True, DEPLOY_TO_SERVERS must also be True
DEPLOY_SERVICES = False
UWSGI_SOCKET_PATH = '/tmp/%s.uwsgi.sock' % PROJECT_FILENAME
# Services are the server-side services we want to enable and configure.
# A three-tuple following this format:
# (service name, service deployment path, service config file extension)
SERVER_SERVICES = [
('app', SERVER_REPOSITORY_PATH, 'ini'),
('uwsgi', '/etc/init', 'conf'),
('nginx', '/etc/nginx/locations-enabled', 'conf'),
]
# These variables will be set at runtime. See configure_targets() below
S3_BUCKET = None
S3_BASE_URL = None
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = None
SERVER_LOG_PATH = None
DEBUG = True
"""
COPY EDITING
"""
COPY_GOOGLE_DOC_KEY = '0Ahk37aM1t_GZdEJBV2pFSlZaT1FwTHBfVkdseDNudHc'
COPY_PATH = 'data/copy.xlsx'
"""
SHARING
"""
SHARE_URL = 'http://%s/%s/' % (PRODUCTION_S3_BUCKET['bucket_name'], PROJECT_SLUG)
# """
# ADS
# """
#
# NPR_DFP = {
# 'STORY_ID': '1002',
# 'TARGET': 'homepage',
# 'ENVIRONMENT': 'NPRTEST',
# 'TESTSERVER': 'false'
# }
"""
SERVICES
"""
NPR_GOOGLE_ANALYTICS = {
'ACCOUNT_ID': 'UA-2139719-1',
'DOMAIN': PRODUCTION_S3_BUCKET['bucket_name'],
'TOPICS': '' # e.g. '[1014,3,1003,1002,1001]'
}
#DISQUS_API_KEY = ''
#DISQUS_UUID = ''
"""
OAUTH
"""
GOOGLE_OAUTH_CREDENTIALS_PATH = '~/.google_oauth_credentials'
authomatic_config = {
'google': {
'id': 1,
'class_': oauth2.Google,
'consumer_key': os.environ.get('GOOGLE_OAUTH_CLIENT_ID'),
'consumer_secret': os.environ.get('GOOGLE_OAUTH_CONSUMER_SECRET'),
'scope': ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/userinfo.email'],
'offline': True,
},
}
authomatic = Authomatic(authomatic_config, os.environ.get('AUTHOMATIC_SALT'))
"""
Utilities
"""
def get_secrets():
"""
A method for accessing our secrets.
"""
secrets_dict = {}
for k,v in os.environ.items():
if k.startswith(PROJECT_SLUG):
k = k[len(PROJECT_SLUG) + 1:]
secrets_dict[k] = v
return secrets_dict
def configure_targets(deployment_target):
"""
Configure deployment targets. Abstracted so this can be
overriden for rendering before deployment.
"""
global S3_BUCKET
global S3_BASE_URL
global S3_DEPLOY_URL
global SERVERS
global SERVER_BASE_URL
global SERVER_LOG_PATH
global DEBUG
global DEPLOYMENT_TARGET
global DISQUS_SHORTNAME
global ASSETS_MAX_AGE
if deployment_target == 'production':
S3_BUCKET = PRODUCTION_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = PRODUCTION_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
DISQUS_SHORTNAME = ''
DEBUG = False
ASSETS_MAX_AGE = 86400
elif deployment_target == 'staging':
S3_BUCKET = STAGING_S3_BUCKET
S3_BASE_URL = 'http://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
S3_DEPLOY_URL = 's3://%s/%s' % (S3_BUCKET['bucket_name'], PROJECT_SLUG)
SERVERS = STAGING_SERVERS
SERVER_BASE_URL = 'http://%s/%s' % (SERVERS[0], PROJECT_SLUG)
DISQUS_SHORTNAME = ''
DEBUG = True
ASSETS_MAX_AGE = 20
else:
S3_BUCKET = None
S3_BASE_URL = 'http://127.0.0.1:8000'
S3_DEPLOY_URL = None
SERVERS = []
SERVER_BASE_URL = 'http://127.0.0.1:8001/%s' % PROJECT_SLUG
DISQUS_SHORTNAME = ''
DEBUG = True
ASSETS_MAX_AGE = 20
DEPLOYMENT_TARGET = deployment_target
"""
Run automated configuration
"""
DEPLOYMENT_TARGET = os.environ.get('DEPLOYMENT_TARGET', None)
configure_targets(DEPLOYMENT_TARGET)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Auth driver for ldap. Includes FakeLdapDriver.
It should be easy to create a replacement for this driver supporting
other backends by creating another class that exposes the same
public methods.
"""
import functools
import sys
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
ldap_opts = [
cfg.IntOpt('ldap_schema_version',
default=2,
help='Current version of the LDAP schema'),
cfg.StrOpt('ldap_url',
default='ldap://localhost',
help='Point this at your ldap server'),
cfg.StrOpt('ldap_password',
default='changeme',
help='LDAP password'),
cfg.StrOpt('ldap_user_dn',
default='cn=Manager,dc=example,dc=com',
help='DN of admin user'),
cfg.StrOpt('ldap_user_id_attribute',
default='uid',
help='Attribute to use as id'),
cfg.StrOpt('ldap_user_name_attribute',
default='cn',
help='Attribute to use as name'),
cfg.StrOpt('ldap_user_unit',
default='Users',
help='OID for Users'),
cfg.StrOpt('ldap_user_subtree',
default='ou=Users,dc=example,dc=com',
help='OU for Users'),
cfg.BoolOpt('ldap_user_modify_only',
default=False,
help='Modify user attributes instead of creating/deleting'),
cfg.StrOpt('ldap_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Projects'),
cfg.StrOpt('role_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Roles'),
# NOTE(vish): mapping with these flags is necessary because we're going
# to tie in to an existing ldap schema
cfg.StrOpt('ldap_cloudadmin',
default='cn=cloudadmins,ou=Groups,dc=example,dc=com',
help='cn for Cloud Admins'),
cfg.StrOpt('ldap_itsec',
default='cn=itsec,ou=Groups,dc=example,dc=com',
help='cn for ItSec'),
cfg.StrOpt('ldap_sysadmin',
default='cn=sysadmins,ou=Groups,dc=example,dc=com',
help='cn for Sysadmins'),
cfg.StrOpt('ldap_netadmin',
default='cn=netadmins,ou=Groups,dc=example,dc=com',
help='cn for NetAdmins'),
cfg.StrOpt('ldap_developer',
default='cn=developers,ou=Groups,dc=example,dc=com',
help='cn for Developers'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(ldap_opts)
LOG = logging.getLogger(__name__)
if FLAGS.memcached_servers:
import memcache
else:
from nova.common import memorycache as memcache
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
def _clean(attr):
"""Clean attr for insertion into ldap"""
if attr is None:
return None
if isinstance(attr, unicode):
return str(attr)
return attr
def sanitize(fn):
"""Decorator to sanitize all args"""
@functools.wraps(fn)
def _wrapped(self, *args, **kwargs):
args = [_clean(x) for x in args]
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
return fn(self, *args, **kwargs)
_wrapped.func_name = fn.func_name
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
project_pattern = '(owner=*)'
isadmin_attribute = 'isNovaAdmin'
project_attribute = 'owner'
project_objectclass = 'groupOfNames'
conn = None
mc = None
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
if FLAGS.ldap_schema_version == 1:
LdapDriver.project_pattern = '(objectclass=novaProject)'
LdapDriver.isadmin_attribute = 'isAdmin'
LdapDriver.project_attribute = 'projectManager'
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def __enter__(self):
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
self.__cache = {}
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__cache = None
return False
def __local_cache(key_fmt): # pylint: disable=E0213
"""Wrap function to cache its result in self.__cache.
Works only with functions with one fixed argument.
"""
def do_wrap(fn):
@functools.wraps(fn)
def inner(self, arg, **kwargs):
cache_key = key_fmt % (arg,)
try:
res = self.__cache[cache_key]
LOG.debug('Local cache hit for %s by key %s' %
(fn.__name__, cache_key))
return res
except KeyError:
res = fn(self, arg, **kwargs)
self.__cache[cache_key] = res
return res
return inner
return do_wrap
@sanitize
@__local_cache('uid_user-%s')
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__get_ldap_user(uid)
if attr is None:
raise exception.LDAPUserNotFound(user_id=uid)
return self.__to_user(attr)
@sanitize
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
cache_key = 'uak_dn_%s' % (access,)
user_dn = self.mc.get(cache_key)
if user_dn:
user = self.__to_user(
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
if user:
if user['access'] == access:
return user
else:
self.mc.set(cache_key, None)
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
user_obj = self.__find_object(dn, query)
user = self.__to_user(user_obj)
if user:
self.mc.set(cache_key, user_obj['dn'][0])
return user
@sanitize
@__local_cache('pid_project-%s')
def get_project(self, pid):
"""Retrieve project by id"""
dn = self.__project_to_dn(pid, search=False)
attr = self.__find_object(dn, LdapDriver.project_pattern,
scope=self.ldap.SCOPE_BASE)
return self.__to_project(attr)
@sanitize
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)')
users = []
for attr in attrs:
user = self.__to_user(attr)
if user is not None:
users.append(user)
return users
@sanitize
def get_projects(self, uid=None):
"""Retrieve list of projects"""
pattern = LdapDriver.project_pattern
if uid:
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
pattern)
return [self.__to_project(attr) for attr in attrs]
@sanitize
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
raise exception.LDAPUserExists(user=name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
user = self.__get_ldap_user(name)
# Entry could be malformed, test for missing attrs.
# Malformed entries are useless, replace attributes found.
attr = []
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'secretKey',
[secret_key]))
else:
attr.append((self.ldap.MOD_ADD, 'secretKey',
[secret_key]))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'accessKey',
[access_key]))
else:
attr.append((self.ldap.MOD_ADD, 'accessKey',
[access_key]))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_REPLACE,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
else:
attr.append((self.ldap.MOD_ADD,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
raise exception.LDAPUserNotFound(user_id=name)
else:
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'novaUser']),
('ou', [FLAGS.ldap_user_unit]),
(FLAGS.ldap_user_id_attribute, [name]),
('sn', [name]),
(FLAGS.ldap_user_name_attribute, [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
(LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
@sanitize
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
raise exception.ProjectExists(project=name)
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
description = name
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
members.append(manager_dn)
attr = [
('objectclass', [LdapDriver.project_objectclass]),
('cn', [name]),
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
dn = self.__project_to_dn(name, search=False)
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
@sanitize
def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project"""
if not manager_uid and not description:
return
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
if not self.is_in_project(manager_uid, project_id):
self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
@sanitize
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
@sanitize
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
@sanitize
def has_role(self, uid, role, project_id=None):
"""Check if user has role
If project is specified, it checks for local role, otherwise it
checks for global role
"""
role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn)
@sanitize
def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
if not self.__group_exists(role_dn):
# create the role if it doesn't exist
description = '%s role for %s' % (role, project_id)
self.__create_group(role_dn, role, uid, description)
else:
return self.__add_to_group(uid, role_dn)
@sanitize
def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
@sanitize
def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)"""
if project_id is None:
# NOTE(vish): This is unneccesarily slow, but since we can't
# guarantee that the global roles are located
# together in the ldap tree, we're doing this version.
roles = []
for role in FLAGS.allowed_roles:
role_dn = self.__role_to_dn(role)
if self.__is_in_group(uid, role_dn):
roles.append(role)
return roles
else:
project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles]
@sanitize
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
attr = []
# Retrieve user by name
user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'secretKey',
user['secretKey']))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'accessKey',
user['accessKey']))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_DELETE,
LdapDriver.isadmin_attribute,
user[LdapDriver.isadmin_attribute]))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
else:
# Delete entry
self.conn.delete_s(self.__uid_to_dn(uid))
@sanitize
def delete_project(self, project_id):
"""Delete a project"""
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@sanitize
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing user"""
if not access_key and not secret_key and admin is None:
return
attr = []
if access_key:
attr.append((self.ldap.MOD_REPLACE, 'accessKey', access_key))
if secret_key:
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
if admin is not None:
attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
str(admin).upper()))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
def __user_exists(self, uid):
"""Check if user exists"""
try:
return self.get_user(uid) is not None
except exception.LDAPUserNotFound:
return False
def __ldap_user_exists(self, uid):
"""Check if the user exists in ldap"""
return self.__get_ldap_user(uid) is not None
def __project_exists(self, project_id):
"""Check if project exists"""
return self.get_project(project_id) is not None
@__local_cache('uid_attrs-%s')
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
dn = FLAGS.ldap_user_subtree
query = ('(&(%s=%s)(objectclass=novaUser))' %
(FLAGS.ldap_user_id_attribute, uid))
return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the DNs
return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
if query is None:
query = "(objectClass=*)"
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the attributes
# FIXME(yorik-sar): Whole driver should be refactored to
# prevent this hack
res1 = []
for dn, attrs in res:
attrs['dn'] = [dn]
res1.append(attrs)
return res1
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
query = ('(&(objectclass=groupOfNames)(!%s))' %
LdapDriver.project_pattern)
return self.__find_dns(tree, query)
def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member"""
query = ('(&(objectclass=groupOfNames)(member=%s))' %
self.__uid_to_dn(uid))
dns = self.__find_dns(tree, query)
return dns
def __group_exists(self, dn):
"""Check if group exists"""
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS["ldap_%s" % role]
else:
project_dn = self.__project_to_dn(project_id)
return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.LDAPGroupExists(group=name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
members.append(dn)
attr = [
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
('member', members)]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE)
return res is not None
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn)
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__is_in_group(uid, group_dn):
raise exception.LDAPGroupMembershipNotFound(user_id=uid,
group_id=group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
# FIXME(vish): what if deleted user is a project manager?
attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
self.__safe_remove_from_group(uid, role_dn)
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
self.__safe_remove_from_group(uid, project_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
"""Delete all roles for project"""
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@__local_cache('uid_dn-%s')
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
if search:
query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
user = self.__find_dns(FLAGS.ldap_user_subtree, query)
if len(user) > 0:
userdn = user[0]
return userdn
@__local_cache('pid_dn-%s')
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
if search:
query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
project = self.__find_dns(FLAGS.ldap_project_subtree, query)
if len(project) > 0:
projectdn = project[0]
return projectdn
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
if attr is None:
return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() and
LdapDriver.isadmin_attribute in attr.keys()):
return {
'id': attr[FLAGS.ldap_user_id_attribute][0],
'name': attr[FLAGS.ldap_user_name_attribute][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
else:
return None
@__local_cache('dn_uid-%s')
def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
query = '(objectclass=novaUser)'
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self):
import nova.auth.fakeldap
sys.modules['ldap'] = nova.auth.fakeldap
super(FakeLdapDriver, self).__init__()
| |
# -*- coding: utf8 -*-
# Most of the test cases here are from isodate package from:
# - Homepage: http://cheeseshop.python.org/pypi/isodate
# - Author: Gerhard Weis <gerhard weis at proclos com>
# - License: BSD
import os
import unittest
from datetime import date, datetime, time
from isodate import parse_date, ISO8601Error, date_isoformat, FixedOffset, UTC, \
DATE_CENTURY, DATE_YEAR, DATE_EXT_MONTH, DATE_EXT_COMPLETE, DATE_BAS_COMPLETE, \
DATE_BAS_ORD_COMPLETE, DATE_EXT_ORD_COMPLETE, DATE_BAS_WEEK, \
DATE_BAS_WEEK_COMPLETE, DATE_EXT_WEEK, DATE_EXT_WEEK_COMPLETE, \
TIME_BAS_MINUTE, TZ_BAS, TIME_EXT_MINUTE, TZ_EXT, TZ_HOUR, \
TIME_BAS_COMPLETE, TIME_EXT_COMPLETE, TIME_HOUR
DATE_Y4_TEST_CASES = [
('19', date(1901, 1, 1), DATE_CENTURY),
('1985', date(1985, 1, 1), DATE_YEAR),
('1985-04', date(1985, 4, 1), DATE_EXT_MONTH),
('1985-04-12', date(1985, 4, 12), DATE_EXT_COMPLETE),
('19850412', date(1985, 4, 12), DATE_BAS_COMPLETE),
('1985102', date(1985, 4, 12), DATE_BAS_ORD_COMPLETE),
('1985-102', date(1985, 4, 12), DATE_EXT_ORD_COMPLETE),
('1985W155', date(1985, 4, 12), DATE_BAS_WEEK_COMPLETE),
('1985-W15-5', date(1985, 4, 12), DATE_EXT_WEEK_COMPLETE),
('1985W15', date(1985, 4, 8), DATE_BAS_WEEK),
('1985-W15', date(1985, 4, 8), DATE_EXT_WEEK),
('1989-W15', date(1989, 4, 10), DATE_EXT_WEEK),
('1989-W15-5', date(1989, 4, 14), DATE_EXT_WEEK_COMPLETE),
('1-W1-1', None, DATE_BAS_WEEK_COMPLETE)]
DATE_Y6_TEST_CASES = [
('+0019', date(1901, 1, 1), DATE_CENTURY),
('+001985', date(1985, 1, 1), DATE_YEAR),
('+001985-04', date(1985, 4, 1), DATE_EXT_MONTH),
('+001985-04-12', date(1985, 4, 12), DATE_EXT_COMPLETE),
('+0019850412', date(1985, 4, 12), DATE_BAS_COMPLETE),
('+001985102', date(1985, 4, 12), DATE_BAS_ORD_COMPLETE),
('+001985-102', date(1985, 4, 12), DATE_EXT_ORD_COMPLETE),
('+001985W155', date(1985, 4, 12), DATE_BAS_WEEK_COMPLETE),
('+001985-W15-5', date(1985, 4, 12), DATE_EXT_WEEK_COMPLETE),
('+001985W15', date(1985, 4, 8), DATE_BAS_WEEK),
('+001985-W15', date(1985, 4, 8), DATE_EXT_WEEK)]
DATETIME_TEST_CASES = [
('19850412T1015', datetime(1985, 4, 12, 10, 15),
DATE_BAS_COMPLETE + 'T' + TIME_BAS_MINUTE),
('1985-04-12T10:15', datetime(1985, 4, 12, 10, 15),
DATE_EXT_COMPLETE + 'T' + TIME_EXT_MINUTE),
('1985102T1015Z', datetime(1985, 4, 12, 10, 15, tzinfo=UTC),
DATE_BAS_ORD_COMPLETE + 'T' + TIME_BAS_MINUTE + TZ_BAS),
('1985-102T10:15Z', datetime(1985, 4, 12, 10, 15, tzinfo=UTC),
DATE_EXT_ORD_COMPLETE + 'T' + TIME_EXT_MINUTE + TZ_EXT),
('1985W155T1015+0400', datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, '+0400')),
DATE_BAS_WEEK_COMPLETE + 'T' + TIME_BAS_MINUTE + TZ_BAS),
('1985-W15-5T10:15+04', datetime(1985, 4, 12, 10, 15, tzinfo=FixedOffset(4, 0, '+0400')),
DATE_EXT_WEEK_COMPLETE + 'T' + TIME_EXT_MINUTE + TZ_HOUR)]
TIME_TEST_CASES = [
('232050', time(23, 20, 50), TIME_BAS_COMPLETE + TZ_BAS),
('23:20:50', time(23, 20, 50), TIME_EXT_COMPLETE + TZ_EXT),
('2320', time(23, 20), TIME_BAS_MINUTE),
('23:20', time(23, 20), TIME_EXT_MINUTE),
('23', time(23), TIME_HOUR),
('232050,5', time(23, 20, 50, 500000), None),
('23:20:50.5', time(23, 20, 50, 500000), None),
('2320,8', time(23, 20, 48), None),
('23:20,8', time(23, 20, 48), None),
('23,3', time(23, 18), None),
('232030Z', time(23, 20, 30, tzinfo=UTC), TIME_BAS_COMPLETE + TZ_BAS),
('2320Z', time(23, 20, tzinfo=UTC), TIME_BAS_MINUTE + TZ_BAS),
('23Z', time(23, tzinfo=UTC), TIME_HOUR + TZ_BAS),
('23:20:30Z', time(23, 20, 30, tzinfo=UTC), TIME_EXT_COMPLETE + TZ_EXT),
('23:20Z', time(23, 20, tzinfo=UTC), TIME_EXT_MINUTE + TZ_EXT),
('152746+0100', time(15, 27, 46, tzinfo=FixedOffset(1, 0, '+0100')), TIME_BAS_COMPLETE + TZ_BAS),
('152746-0500', time(15, 27, 46, tzinfo=FixedOffset(-5, 0, '-0500')), TIME_BAS_COMPLETE + TZ_BAS),
('152746+01', time(15, 27, 46, tzinfo=FixedOffset(1, 0, '+01:00')), TIME_BAS_COMPLETE + TZ_HOUR),
('152746-05', time(15, 27, 46, tzinfo=FixedOffset(-5, -0, '-05:00')), TIME_BAS_COMPLETE + TZ_HOUR),
('15:27:46+01:00', time(15, 27, 46, tzinfo=FixedOffset(1, 0, '+01:00')), TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46-05:00', time(15, 27, 46, tzinfo=FixedOffset(-5, -0, '-05:00')), TIME_EXT_COMPLETE + TZ_EXT),
('15:27:46+01', time(15, 27, 46, tzinfo=FixedOffset(1, 0, '+01:00')), TIME_EXT_COMPLETE + TZ_HOUR),
('15:27:46-05', time(15, 27, 46, tzinfo=FixedOffset(-5, -0, '-05:00')), TIME_EXT_COMPLETE + TZ_HOUR),
('1:17:30', None, TIME_EXT_COMPLETE)]
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._old_settings_mod = os.environ.get('DJANGO_SETTINGS_MODULE', None)
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_iso8601' # whatever
def tearDown(self):
if self._old_settings_mod is not None:
os.environ['DJANGO_SETTINGS_MODULE'] = self._old_settings_mod
def create_date_testcase(yeardigits, datestring, expectation, format):
class TestDate(BaseTestCase):
def test_field(self):
from django_iso8601 import ISO8601DateField
from django.forms import ValidationError
field = ISO8601DateField(yeardigits=yeardigits)
if expectation is None:
self.assertRaises(ValidationError,
field.to_python, datestring)
else:
self.assertEqual(field.to_python(datestring),
expectation)
def test_widget(self):
from django_iso8601 import ISO8601DateInput
from django.forms import ValidationError
widget = ISO8601DateInput(format=format, yeardigits=yeardigits)
if expectation is not None:
self.assertEqual(widget.format_value(expectation),
datestring)
else:
self.assertEqual(widget.format_value(expectation), None)
return unittest.TestLoader().loadTestsFromTestCase(TestDate)
def create_datetime_testcase(datetimestring, expectation, format):
class TestDatetime(BaseTestCase):
def test_field(self):
from django_iso8601 import ISO8601DatetimeField
from django.forms import ValidationError
field = ISO8601DatetimeField()
if expectation is None:
self.assertRaises(ValidationError,
field.to_python, datetimestring)
else:
self.assertEqual(field.to_python(datetimestring),
expectation)
def test_widget(self):
from django_iso8601 import ISO8601DatetimeInput
from django.forms import ValidationError
widget = ISO8601DatetimeInput(format=format)
if expectation is not None:
self.assertEqual(widget.format_value(expectation),
datetimestring)
else:
self.assertEqual(widget.format_value(expectation), None)
return unittest.TestLoader().loadTestsFromTestCase(TestDatetime)
def create_time_testcase(timestring, expectation, format):
class TestTime(BaseTestCase):
def test_field(self):
from django_iso8601 import ISO8601TimeField
from django.forms import ValidationError
field = ISO8601TimeField()
if expectation is None:
self.assertRaises(ValidationError,
field.to_python, timestring)
else:
self.assertEqual(field.to_python(timestring),
expectation)
def test_widget(self):
from django_iso8601 import ISO8601TimeInput
from django.forms import ValidationError
widget = ISO8601TimeInput(format=format)
if expectation is not None:
if format is not None:
self.assertEqual(widget.format_value(expectation),
timestring)
else:
self.assertEqual(widget.format_value(expectation), None)
return unittest.TestLoader().loadTestsFromTestCase(TestTime)
def test_suite():
'''
Construct a TestSuite instance for all test cases.
'''
suite = unittest.TestSuite()
for s, expectation, format in DATE_Y4_TEST_CASES:
suite.addTest(create_date_testcase(4, s, expectation, format))
for s, expectation, format in DATE_Y6_TEST_CASES:
suite.addTest(create_date_testcase(6, s, expectation, format))
for s, expectation, format in DATETIME_TEST_CASES:
suite.addTest(create_datetime_testcase(s, expectation, format))
for s, expectation, format in TIME_TEST_CASES:
suite.addTest(create_time_testcase(s, expectation, format))
return suite
def main():
unittest.main(defaultTest='test_suite')
if __name__ == '__main__':
main()
| |
'''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters don't handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
import scikits.statsmodels.api as sm
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : array
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order) == True: #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print m.shape
## print x.shape
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : array
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : array
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print sl
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print xext.shape
print avgkern[:,None].shape
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print '\ncheckin moving mean and variance'
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print movmean(x, windowsize=ws, lag='lagged')
print movvar(x, windowsize=ws, lag='lagged')
print [np.var(x[i-ws:i]) for i in range(ws, nobs)]
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print m1
print m2
print m2 - m1*m1
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print '\nchecking moving moment for 2d (columns only)'
x2d = np.c_[x, 2*x]
print movmoment(x2d, 1, windowsize=3, lag='centered')
print movmean(x2d, windowsize=ws, lag='lagged')
print movvar(x2d, windowsize=ws, lag='lagged')
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0)
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| |
"""JOSE Web Signature."""
import argparse
import base64
import sys
import OpenSSL
import six
from acme.jose import b64
from acme.jose import errors
from acme.jose import json_util
from acme.jose import jwa
from acme.jose import jwk
from acme.jose import util
class MediaType(object):
"""MediaType field encoder/decoder."""
PREFIX = 'application/'
"""MIME Media Type and Content Type prefix."""
@classmethod
def decode(cls, value):
"""Decoder."""
# 4.1.10
if '/' not in value:
if ';' in value:
raise errors.DeserializationError('Unexpected semi-colon')
return cls.PREFIX + value
return value
@classmethod
def encode(cls, value):
"""Encoder."""
# 4.1.10
if ';' not in value:
assert value.startswith(cls.PREFIX)
return value[len(cls.PREFIX):]
return value
class Header(json_util.JSONObjectWithFields):
"""JOSE Header.
.. warning:: This class supports **only** Registered Header
Parameter Names (as defined in section 4.1 of the
protocol). If you need Public Header Parameter Names (4.2)
or Private Header Parameter Names (4.3), you must subclass
and override :meth:`from_json` and :meth:`to_partial_json`
appropriately.
.. warning:: This class does not support any extensions through
the "crit" (Critical) Header Parameter (4.1.11) and as a
conforming implementation, :meth:`from_json` treats its
occurence as an error. Please subclass if you seek for
a different behaviour.
:ivar x5tS256: "x5t#S256"
:ivar str typ: MIME Media Type, inc. :const:`MediaType.PREFIX`.
:ivar str cty: Content-Type, inc. :const:`MediaType.PREFIX`.
"""
alg = json_util.Field(
'alg', decoder=jwa.JWASignature.from_json, omitempty=True)
jku = json_util.Field('jku', omitempty=True)
jwk = json_util.Field('jwk', decoder=jwk.JWK.from_json, omitempty=True)
kid = json_util.Field('kid', omitempty=True)
x5u = json_util.Field('x5u', omitempty=True)
x5c = json_util.Field('x5c', omitempty=True, default=())
x5t = json_util.Field(
'x5t', decoder=json_util.decode_b64jose, omitempty=True)
x5tS256 = json_util.Field(
'x5t#S256', decoder=json_util.decode_b64jose, omitempty=True)
typ = json_util.Field('typ', encoder=MediaType.encode,
decoder=MediaType.decode, omitempty=True)
cty = json_util.Field('cty', encoder=MediaType.encode,
decoder=MediaType.decode, omitempty=True)
crit = json_util.Field('crit', omitempty=True, default=())
def not_omitted(self):
"""Fields that would not be omitted in the JSON object."""
return dict((name, getattr(self, name))
for name, field in six.iteritems(self._fields)
if not field.omit(getattr(self, name)))
def __add__(self, other):
if not isinstance(other, type(self)):
raise TypeError('Header cannot be added to: {0}'.format(
type(other)))
not_omitted_self = self.not_omitted()
not_omitted_other = other.not_omitted()
if set(not_omitted_self).intersection(not_omitted_other):
raise TypeError('Addition of overlapping headers not defined')
not_omitted_self.update(not_omitted_other)
return type(self)(**not_omitted_self) # pylint: disable=star-args
def find_key(self):
"""Find key based on header.
.. todo:: Supports only "jwk" header parameter lookup.
:returns: (Public) key found in the header.
:rtype: :class:`acme.jose.jwk.JWK`
:raises acme.jose.errors.Error: if key could not be found
"""
if self.jwk is None:
raise errors.Error('No key found')
return self.jwk
@crit.decoder
def crit(unused_value):
# pylint: disable=missing-docstring,no-self-argument,no-self-use
raise errors.DeserializationError(
'"crit" is not supported, please subclass')
# x5c does NOT use JOSE Base64 (4.1.6)
@x5c.encoder
def x5c(value): # pylint: disable=missing-docstring,no-self-argument
return [base64.b64encode(OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_ASN1, cert)) for cert in value]
@x5c.decoder
def x5c(value): # pylint: disable=missing-docstring,no-self-argument
try:
return tuple(util.ComparableX509(OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_ASN1,
base64.b64decode(cert))) for cert in value)
except OpenSSL.crypto.Error as error:
raise errors.DeserializationError(error)
class Signature(json_util.JSONObjectWithFields):
"""JWS Signature.
:ivar combined: Combined Header (protected and unprotected,
:class:`Header`).
:ivar unicode protected: JWS protected header (Jose Base-64 decoded).
:ivar header: JWS Unprotected Header (:class:`Header`).
:ivar str signature: The signature.
"""
header_cls = Header
__slots__ = ('combined',)
protected = json_util.Field('protected', omitempty=True, default='')
header = json_util.Field(
'header', omitempty=True, default=header_cls(),
decoder=header_cls.from_json)
signature = json_util.Field(
'signature', decoder=json_util.decode_b64jose,
encoder=json_util.encode_b64jose)
@protected.encoder
def protected(value): # pylint: disable=missing-docstring,no-self-argument
# wrong type guess (Signature, not bytes) | pylint: disable=no-member
return json_util.encode_b64jose(value.encode('utf-8'))
@protected.decoder
def protected(value): # pylint: disable=missing-docstring,no-self-argument
return json_util.decode_b64jose(value).decode('utf-8')
def __init__(self, **kwargs):
if 'combined' not in kwargs:
kwargs = self._with_combined(kwargs)
super(Signature, self).__init__(**kwargs)
assert self.combined.alg is not None
@classmethod
def _with_combined(cls, kwargs):
assert 'combined' not in kwargs
header = kwargs.get('header', cls._fields['header'].default)
protected = kwargs.get('protected', cls._fields['protected'].default)
if protected:
combined = header + cls.header_cls.json_loads(protected)
else:
combined = header
kwargs['combined'] = combined
return kwargs
@classmethod
def _msg(cls, protected, payload):
return (b64.b64encode(protected.encode('utf-8')) + b'.' +
b64.b64encode(payload))
def verify(self, payload, key=None):
"""Verify.
:param key: Key used for verification.
:type key: :class:`acme.jose.jwk.JWK`
"""
key = self.combined.find_key() if key is None else key
return self.combined.alg.verify(
key=key.key, sig=self.signature,
msg=self._msg(self.protected, payload))
@classmethod
def sign(cls, payload, key, alg, include_jwk=True,
protect=frozenset(), **kwargs):
"""Sign.
:param key: Key for signature.
:type key: :class:`acme.jose.jwk.JWK`
"""
assert isinstance(key, alg.kty)
header_params = kwargs
header_params['alg'] = alg
if include_jwk:
header_params['jwk'] = key.public_key()
assert set(header_params).issubset(cls.header_cls._fields)
assert protect.issubset(cls.header_cls._fields)
protected_params = {}
for header in protect:
protected_params[header] = header_params.pop(header)
if protected_params:
# pylint: disable=star-args
protected = cls.header_cls(**protected_params).json_dumps()
else:
protected = ''
header = cls.header_cls(**header_params) # pylint: disable=star-args
signature = alg.sign(key.key, cls._msg(protected, payload))
return cls(protected=protected, header=header, signature=signature)
def fields_to_partial_json(self):
fields = super(Signature, self).fields_to_partial_json()
if not fields['header'].not_omitted():
del fields['header']
return fields
@classmethod
def fields_from_json(cls, jobj):
fields = super(Signature, cls).fields_from_json(jobj)
fields_with_combined = cls._with_combined(fields)
if 'alg' not in fields_with_combined['combined'].not_omitted():
raise errors.DeserializationError('alg not present')
return fields_with_combined
class JWS(json_util.JSONObjectWithFields):
"""JSON Web Signature.
:ivar str payload: JWS Payload.
:ivar str signature: JWS Signatures.
"""
__slots__ = ('payload', 'signatures')
signature_cls = Signature
def verify(self, key=None):
"""Verify."""
return all(sig.verify(self.payload, key) for sig in self.signatures)
@classmethod
def sign(cls, payload, **kwargs):
"""Sign."""
return cls(payload=payload, signatures=(
cls.signature_cls.sign(payload=payload, **kwargs),))
@property
def signature(self):
"""Get a singleton signature.
:rtype: `signature_cls`
"""
assert len(self.signatures) == 1
return self.signatures[0]
def to_compact(self):
"""Compact serialization.
:rtype: bytes
"""
assert len(self.signatures) == 1
assert 'alg' not in self.signature.header.not_omitted()
# ... it must be in protected
return (
b64.b64encode(self.signature.protected.encode('utf-8'))
+ b'.' +
b64.b64encode(self.payload)
+ b'.' +
b64.b64encode(self.signature.signature))
@classmethod
def from_compact(cls, compact):
"""Compact deserialization.
:param bytes compact:
"""
try:
protected, payload, signature = compact.split(b'.')
except ValueError:
raise errors.DeserializationError(
'Compact JWS serialization should comprise of exactly'
' 3 dot-separated components')
sig = cls.signature_cls(
protected=b64.b64decode(protected).decode('utf-8'),
signature=b64.b64decode(signature))
return cls(payload=b64.b64decode(payload), signatures=(sig,))
def to_partial_json(self, flat=True): # pylint: disable=arguments-differ
assert self.signatures
payload = json_util.encode_b64jose(self.payload)
if flat and len(self.signatures) == 1:
ret = self.signatures[0].to_partial_json()
ret['payload'] = payload
return ret
else:
return {
'payload': payload,
'signatures': self.signatures,
}
@classmethod
def from_json(cls, jobj):
if 'signature' in jobj and 'signatures' in jobj:
raise errors.DeserializationError('Flat mixed with non-flat')
elif 'signature' in jobj: # flat
return cls(payload=json_util.decode_b64jose(jobj.pop('payload')),
signatures=(cls.signature_cls.from_json(jobj),))
else:
return cls(payload=json_util.decode_b64jose(jobj['payload']),
signatures=tuple(cls.signature_cls.from_json(sig)
for sig in jobj['signatures']))
class CLI(object):
"""JWS CLI."""
@classmethod
def sign(cls, args):
"""Sign."""
key = args.alg.kty.load(args.key.read())
args.key.close()
if args.protect is None:
args.protect = []
if args.compact:
args.protect.append('alg')
sig = JWS.sign(payload=sys.stdin.read().encode(), key=key, alg=args.alg,
protect=set(args.protect))
if args.compact:
six.print_(sig.to_compact().decode('utf-8'))
else: # JSON
six.print_(sig.json_dumps_pretty())
@classmethod
def verify(cls, args):
"""Verify."""
if args.compact:
sig = JWS.from_compact(sys.stdin.read().encode())
else: # JSON
try:
sig = JWS.json_loads(sys.stdin.read())
except errors.Error as error:
six.print_(error)
return -1
if args.key is not None:
assert args.kty is not None
key = args.kty.load(args.key.read()).public_key()
args.key.close()
else:
key = None
sys.stdout.write(sig.payload)
return not sig.verify(key=key)
@classmethod
def _alg_type(cls, arg):
return jwa.JWASignature.from_json(arg)
@classmethod
def _header_type(cls, arg):
assert arg in Signature.header_cls._fields
return arg
@classmethod
def _kty_type(cls, arg):
assert arg in jwk.JWK.TYPES
return jwk.JWK.TYPES[arg]
@classmethod
def run(cls, args=sys.argv[1:]):
"""Parse arguments and sign/verify."""
parser = argparse.ArgumentParser()
parser.add_argument('--compact', action='store_true')
subparsers = parser.add_subparsers()
parser_sign = subparsers.add_parser('sign')
parser_sign.set_defaults(func=cls.sign)
parser_sign.add_argument(
'-k', '--key', type=argparse.FileType('rb'), required=True)
parser_sign.add_argument(
'-a', '--alg', type=cls._alg_type, default=jwa.RS256)
parser_sign.add_argument(
'-p', '--protect', action='append', type=cls._header_type)
parser_verify = subparsers.add_parser('verify')
parser_verify.set_defaults(func=cls.verify)
parser_verify.add_argument(
'-k', '--key', type=argparse.FileType('rb'), required=False)
parser_verify.add_argument(
'--kty', type=cls._kty_type, required=False)
parsed = parser.parse_args(args)
return parsed.func(parsed)
if __name__ == '__main__':
exit(CLI.run()) # pragma: no cover
| |
class system_function(object):
""" Class for representing system functions (transfer funtions) on the form:
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
"""
def __init__(self, b, a=[1]):
from numpy import asarray
self.b = asarray(b)
self.a = asarray(a)
def __call__(self, omega):
from numpy import exp
over_brok = self.b[0]
under_brok = self.a[0]
for i in range(1, len(self.b)):
over_brok += self.b[i]*exp(-i*1j*omega)
for i in range(1, len(self.a)):
under_brok += self.a[i]*exp(-i*1j*omega)
return over_brok/under_brok
def __mul__(self, other):
if isinstance(other, (int, float, complex)):
other = system_function([other])
if type(other) is not system_function:
raise TypeError("Cannot multiply a system_function with %s" % type(other))
from numpy import polymul
new_b = polymul(self.b[::-1], other.b[::-1])
new_a = polymul(self.a[::-1], other.a[::-1])
return system_function(new_b[::-1], new_a[::-1])
def __add__(self, other):
if isinstance(other, (int, float, complex)):
other = system_function([other])
if type(other) is not system_function:
raise TypeError("Cannot add a system_function with %s" % type(other))
from numpy import polymul, polyadd
# multiply to get common denominator:
new_a = polymul(self.a[::-1], other.a[::-1])[::-1]
temp1 = polymul(self.b[::-1], other.a[::-1])[::-1]
temp2 = polymul(other.b[::-1], self.a[::-1])[::-1]
# adding with common denominator:
new_b = polyadd(temp1[::-1], temp2[::-1])[::-1]
return system_function(new_b, new_a)
def __sub__(self, other):
if isinstance(other, (int, float, complex)):
other = system_function([other])
if type(other) is not system_function:
raise TypeError("Cannot subtract a system_function with %s" % type(other))
return self + other*system_function([-1])
def __truediv__(self, other):
if isinstance(other, (int, float, complex)):
other = system_function([other])
if type(other) is not system_function:
raise TypeError("Cannot divide a system_function with %s" % type(other))
from numpy import polymul
new_b = polymul(self.a[::-1], other.b[::-1])[::-1]
new_a = polymul(other.a[::-1], self.b[::-1])[::-1]
return system_function(new_b, new_a)
def apply_filter(filter_function, signal):
import numpy as np
import matplotlib.pyplot as plt
if not type(filter_function) == system_function:
raise TypeError("Filter function must be an instance of the system functon class")
if not type(signal) == np.ndarray:
raise TypeError("Signal must be a numpy array (ndarray)")
dimensions = len(signal.shape)
if dimensions == 1:
signal_freqs = np.fft.fft(signal)
freqs = np.linspace(0, 2*np.pi, len(signal_freqs))
system_freqs = filter_function(freqs)
plt.plot(np.abs(signal_freqs))
plt.show()
plt.plot(np.abs(system_freqs))
plt.show()
filter_applied = np.multiply(signal_freqs, system_freqs)
plt.plot(np.abs(filter_applied))
plt.show()
return np.fft.ifft(filter_applied)
elif dimensions == 2:
pass
else:
raise NotImplementedError("Only 1d and 2d signals are supported at the moment. Sorry.")
def response(system_function, usedB=True, amplitude=True, phase=True, filename=None, show_plot=True):
""" Calculates and plots the amplitude and phase response to the filter given by the system_function
Arguments:
system_function -- The system/transfer function to the system
Keyword arguments:
usedB -- Use a logarithmic scale (dB) for the amplitude response instead of linear
amplitude -- wether to plot amplitude or not
phase -- wether to plot phase or not
filename -- if not None, plot will be saved in given filename
show_plot -- wehter to show the plot or not, useful for scripting
"""
from matplotlib.pyplot import plot, ylabel, xlabel, figure, twinx, axis, xticks, savefig, show, grid
from numpy import linspace, log10, abs, angle, unwrap, pi
w = linspace(0, 2*pi, 1000)
fig = figure()
if amplitude and phase:
ax1 = fig.add_subplot(111)
if amplitude:
if usedB:
plot(w, 20 * log10(abs(system_function(w))), 'b')
ylabel('Amplitude response, dB', color='b')
else:
plot(w, abs(system_function(w)), 'b')
ylabel('Amplitude response, $|H(e^{i\omega})|$', color='b')
xlabel('Frequency, $\omega$')
if amplitude and phase:
ax2 = ax1.twinx()
if phase:
angles = unwrap(angle(system_function(w)))
plot(w, angles, 'g')
ylabel('Phase response, $\\angle H(e^{i\omega})$', color='g')
if amplitude and phase:
grid()
axis('tight')
# make x-axis measures in terms of pi
xticks([0, pi/4, pi/2, 3*pi/4, pi, 5*pi/4, 3*pi/2, 7*pi/4, 2*pi], \
['$0$',r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$',\
r'$\frac{5\pi}{4}$', r'$\frac{3\pi}{2}$',r'$\frac{7\pi}{4}$', r'$2\pi$'])
# save if user provided filename
if filename is not None:
savefig(filename)
if show_plot:
show()
def response2(b, a=[1], usedB=True, amplitude=True, phase=True, filename=None, show_plot=True):
""" Calculates and plots the amplitude and phase response to the filter given by H defined as
jw -jw -jwM
jw B(e ) b[0] + b[1]e + .... + b[M]e
H(e ) = ------ = -----------------------------------
jw -jw -jwN
A(e ) a[0] + a[1]e + .... + a[N]e
Arguments:
system_function -- The system/transfer function to the system
Keyword arguments:
usedB -- Use a logarithmic scale (dB) for the amplitude response instead of linear
amplitude -- wether to plot amplitude or not
phase -- wether to plot phase or not
filename -- if this is not None, plot will be saved in given filename
show_plot -- wehter to show the plot or not, useful for scripting
"""
response(system_function(b, a), usedB, amplitude, phase, filename)
def zplane(system_function, filename=None, show_plot=True):
""" Draws the zero-pole-plot for a given system
Arguments:
system_function -- The system/transfer function to the system
Keyword arguments:
filename -- if this is not None, plot will be saved in given filename
show_plot -- wehter to show the plot or not, useful for scripting
"""
zplane2(system_function.b, system_function.a, filename, show_plot)
def zplane2(b, a=[1], filename=None, show_plot=True):
""" Draws the zero-pole-plot for a given system
Arguments:
b -- List of coeffs in numerator
a -- List of coeffs in denominator
Keyword arguments:
filename -- if this is not None, plot will be saved in given filename
show_plot -- wehter to show the plot or not, useful for scripting
"""
from numpy import linspace, cos, sin, roots, real, imag, pi
from matplotlib.pyplot import xlabel, ylabel, axis, savefig, show, scatter, hold, plot, figure
# Use numpy to find zeros in numerator (zeros), and in denominator (poles)
zero = roots(b)
poles = roots(a)
figure()
hold("on")
# plot unit circle
t = linspace(0, 2*pi, 1000)
plot(cos(t), sin(t), "b", linestyle="dotted")
# add poles and zeros
scatter(real(poles), imag(poles), marker='x', color='b', s=100, linewidth=1)
scatter(real(zero), imag(zero), marker='o', facecolors='none', edgecolors='b', s=100, linewidth=1)
xlabel("Real part")
ylabel("Imaginary part")
axis('equal')
if filename is not None:
savefig(filename)
if show_plot:
show()
def plot_group_delay(system_function, filename=None, show_plot=True):
from warnings import filterwarnings
filterwarnings("ignore")
from scipy.signal import group_delay
from matplotlib.pyplot import figure, xlabel, ylabel, savefig, plot, show, axis, xticks
from numpy import pi
w, gd = group_delay((system_function.b, system_function.a), whole=True)
figure()
plot(w, gd, 'b')
xlabel("Frequency")
ylabel("Group delay")
# make x-axis measures in terms of pi
xticks([0, pi/4, pi/2, 3*pi/4, pi, 5*pi/4, 3*pi/2, 7*pi/4, 2*pi], \
['$0$',r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$',\
r'$\frac{5\pi}{4}$', r'$\frac{3\pi}{2}$',r'$\frac{7\pi}{4}$', r'$2\pi$'])
axis("tight")
if filename is not None:
savefig(filename)
if show_plot:
show()
def analyze(system_function):
from matplotlib.pyplot import show, title
response(system_function, usedB=False, show_plot=False)
title("Frequency response")
zplane(system_function, show_plot=False)
title("Pole-zero-plot")
plot_group_delay(system_function, show_plot=False)
title("Group delay")
show()
def sample(function, period_length, Fs, filename=None, show_plot=True):
""" Samples the function with the given sampling frequency (in Hz), on the
interval [0, period_length)
"""
from numpy import linspace
from matplotlib.pyplot import hold, plot, stem, show, axis, savefig
n = linspace(0, period_length, Fs*period_length+1)
if filename is not None or show_plot:
t = linspace(0, period_length, 1000)
hold("on")
plot(t, function(t))
stem(n, function(n))
axis("tight")
if filename is not None:
savefig(filename)
if show_plot:
show()
return n[:-1], function(n)[:-1]
def dtft(sequence, num_of_points=1024):
""" Uses fft to approximate the dtft of a signal """
from numpy.fft import fft
from numpy import linspace, pi
return linspace(0, 2*pi, num_of_points), fft(sequence, num_of_points)
def plot_dtft(sequence, usedB=True, amplitude=True, phase=True, filename=None, show_plot=True, whole=False):
from matplotlib.pyplot import plot, ylabel, xlabel, figure, twinx, axis, xticks, savefig, show, grid
from numpy import log10, abs, angle, unwrap, pi
num_of_points = 1024
w, h = dtft(sequence, num_of_points)
fig = figure()
if not whole:
w = w[0:int(num_of_points/2)]
h = h[0:int(num_of_points/2)]
if amplitude and phase:
ax1 = fig.add_subplot(111)
if amplitude:
if usedB:
plot(w, 20 * log10(abs(h)), 'b')
ylabel('Amplitude, dB', color='b')
else:
plot(w, abs(h), 'b')
ylabel('Amplitude, $|X(e^{i\omega})|$', color='b')
xlabel('Frequency, $\omega$')
if amplitude and phase:
ax2 = ax1.twinx()
if phase:
angles = unwrap(angle(h))
plot(w, angles, 'g')
ylabel('Phase, $\\angle X(e^{i\omega})$', color='g')
if amplitude and phase:
grid()
axis('tight')
# make x-axis measures in terms of pi
if whole:
xticks([0, pi/4, pi/2, 3*pi/4, pi, 5*pi/4, 3*pi/2, 7*pi/4, 2*pi],
['$0$',r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$',
r'$\frac{5\pi}{4}$', r'$\frac{3\pi}{2}$',r'$\frac{7\pi}{4}$', r'$2\pi$'])
else:
xticks([0, pi/4, pi/2, 3*pi/4, pi],
['$0$',r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$'])
# save if user provided filename
if filename is not None:
savefig(filename)
if show_plot:
show()
def a2d(Ap, As):
dp = (10**(Ap/20)-1)/(10**(Ap/20) + 1)
ds = (1+dp)/(10**(As/20))
return dp, ds
def d2a(dp, ds):
from numpy import log10
Ap = 20*log10((1+dp)/(1-dp))
As = 20*log10((1+dp)/ds)
def to_log(signal):
from numpy import log10
return 20*log10(signal)
| |
from __future__ import absolute_import
from copy import copy
from itertools import chain
import os
import re
import sys
import shlex
from tempfile import NamedTemporaryFile
from django.utils.encoding import smart_str
try:
from urllib.request import pathname2url
from urllib.parse import urljoin
except ImportError: # Python2
from urllib import pathname2url
from urlparse import urljoin
import django
from django.conf import settings
from django.template import loader
from django.template.context import Context, RequestContext
import six
from .subprocess import check_output
NO_ARGUMENT_OPTIONS = ['--collate', '--no-collate', '-H', '--extended-help', '-g',
'--grayscale', '-h', '--help', '--htmldoc', '--license', '-l',
'--lowquality', '--manpage', '--no-pdf-compression', '-q',
'--quiet', '--read-args-from-stdin', '--readme',
'--use-xserver', '-V', '--version', '--dump-default-toc-xsl',
'--outline', '--no-outline', '--background', '--no-background',
'--custom-header-propagation', '--no-custom-header-propagation',
'--debug-javascript', '--no-debug-javascript', '--default-header',
'--disable-external-links', '--enable-external-links',
'--disable-forms', '--enable-forms', '--images', '--no-images',
'--disable-internal-links', '--enable-internal-links', '-n',
'--disable-javascript', '--enable-javascript', '--keep-relative-links',
'--disable-local-file-access', '--enable-local-file-access',
'--exclude-from-outline', '--include-in-outline', '--disable-plugins',
'--enable-plugins', '--print-media-type', '--no-print-media-type',
'--resolve-relative-links', '--disable-smart-shrinking',
'--enable-smart-shrinking', '--stop-slow-scripts',
'--no-stop-slow-scripts', '--disable-toc-back-links',
'--enable-toc-back-links', '--footer-line', '--no-footer-line',
'--header-line', '--no-header-line', '--disable-dotted-lines',
'--disable-toc-links', '--verbose']
def _options_to_args(**options):
"""
Converts ``options`` into a list of command-line arguments.
Skip arguments where no value is provided
For flag-type (No argument) variables, pass only the name and only then if the value is True
"""
flags = []
for name in sorted(options):
value = options[name]
formatted_flag = '--%s' % name if len(name) > 1 else '-%s' % name
formatted_flag = formatted_flag.replace('_', '-')
accepts_no_arguments = formatted_flag in NO_ARGUMENT_OPTIONS
if value is None or (value is False and accepts_no_arguments):
continue
flags.append(formatted_flag)
if accepts_no_arguments:
continue
flags.append(six.text_type(value))
return flags
def wkhtmltopdf(pages, output=None, **kwargs):
"""
Converts html to PDF using http://wkhtmltopdf.org/.
pages: List of file paths or URLs of the html to be converted.
output: Optional output file path. If None, the output is returned.
**kwargs: Passed to wkhtmltopdf via _extra_args() (See
https://github.com/antialize/wkhtmltopdf/blob/master/README_WKHTMLTOPDF
for acceptable args.)
Kwargs is passed through as arguments. e.g.:
{'footer_html': 'http://example.com/foot.html'}
becomes
'--footer-html http://example.com/foot.html'
Where there is no value passed, use True. e.g.:
{'disable_javascript': True}
becomes:
'--disable-javascript'
To disable a default option, use None. e.g:
{'quiet': None'}
becomes:
''
example usage:
wkhtmltopdf(pages=['/tmp/example.html'],
dpi=300,
orientation='Landscape',
disable_javascript=True)
"""
if isinstance(pages, six.string_types):
# Support a single page.
pages = [pages]
if output is None:
# Standard output.
output = '-'
has_cover = kwargs.pop('has_cover', False)
# Default options:
options = getattr(settings, 'WKHTMLTOPDF_CMD_OPTIONS', None)
if options is None:
options = {'quiet': True}
else:
options = copy(options)
options.update(kwargs)
# Force --encoding utf8 unless the user has explicitly overridden this.
options.setdefault('encoding', 'utf8')
env = getattr(settings, 'WKHTMLTOPDF_ENV', None)
if env is not None:
env = dict(os.environ, **env)
cmd = 'WKHTMLTOPDF_CMD'
cmd = getattr(settings, cmd, os.environ.get(cmd, 'wkhtmltopdf'))
# Adding 'cover' option to add cover_file to the pdf to generate.
if has_cover:
pages.insert(0, 'cover')
ck_args = list(chain(shlex.split(cmd),
_options_to_args(**options),
list(pages),
[output]))
ck_kwargs = {'env': env}
# Handling of fileno() attr. based on https://github.com/GrahamDumpleton/mod_wsgi/issues/85
try:
i = sys.stderr.fileno()
ck_kwargs['stderr'] = sys.stderr
except (AttributeError, IOError):
# can't call fileno() on mod_wsgi stderr object
pass
return check_output(ck_args, **ck_kwargs)
def convert_to_pdf(filename, header_filename=None, footer_filename=None, cmd_options=None, cover_filename=None):
# Clobber header_html and footer_html only if filenames are
# provided. These keys may be in self.cmd_options as hardcoded
# static files.
# The argument `filename` may be a string or a list. However, wkhtmltopdf
# will coerce it into a list if a string is passed.
cmd_options = cmd_options if cmd_options else {}
if cover_filename:
pages = [cover_filename, filename]
cmd_options['has_cover'] = True
else:
pages = [filename]
if header_filename is not None:
cmd_options['header_html'] = header_filename
if footer_filename is not None:
cmd_options['footer_html'] = footer_filename
return wkhtmltopdf(pages=pages, **cmd_options)
class RenderedFile(object):
"""
Create a temporary file resource of the rendered template with context.
The filename will be used for later conversion to PDF.
"""
temporary_file = None
filename = ''
def __init__(self, template, context, request=None):
debug = getattr(settings, 'WKHTMLTOPDF_DEBUG', settings.DEBUG)
self.temporary_file = render_to_temporary_file(
template=template,
context=context,
request=request,
prefix='wkhtmltopdf', suffix='.html',
delete=(not debug)
)
self.filename = self.temporary_file.name
def __del__(self):
# Always close the temporary_file on object destruction.
if self.temporary_file is not None:
self.temporary_file.close()
def render_pdf_from_template(input_template, header_template, footer_template, context, request=None, cmd_options=None,
cover_template=None):
# For basic usage. Performs all the actions necessary to create a single
# page PDF from a single template and context.
cmd_options = cmd_options if cmd_options else {}
header_filename = footer_filename = None
# Main content.
input_file = RenderedFile(
template=input_template,
context=context,
request=request
)
# Optional. For header template argument.
if header_template:
header_file = RenderedFile(
template=header_template,
context=context,
request=request
)
header_filename = header_file.filename
# Optional. For footer template argument.
if footer_template:
footer_file = RenderedFile(
template=footer_template,
context=context,
request=request
)
footer_filename = footer_file.filename
cover = None
if cover_template:
cover = RenderedFile(
template=cover_template,
context=context,
request=request
)
return convert_to_pdf(filename=input_file.filename,
header_filename=header_filename,
footer_filename=footer_filename,
cmd_options=cmd_options,
cover_filename=cover.filename if cover else None)
def content_disposition_filename(filename):
"""
Sanitize a file name to be used in the Content-Disposition HTTP
header.
Even if the standard is quite permissive in terms of
characters, there are a lot of edge cases that are not supported by
different browsers.
See http://greenbytes.de/tech/tc2231/#attmultinstances for more details.
"""
filename = filename.replace(';', '').replace('"', '')
return http_quote(filename)
def http_quote(string):
"""
Given a unicode string, will do its dandiest to give you back a
valid ascii charset string you can use in, say, http headers and the
like.
"""
if isinstance(string, six.text_type):
try:
import unidecode
except ImportError:
pass
else:
string = unidecode.unidecode(string)
string = string.encode('ascii', 'replace')
# Wrap in double-quotes for ; , and the like
string = string.replace(b'\\', b'\\\\').replace(b'"', b'\\"')
return '"{0!s}"'.format(string.decode())
def pathname2fileurl(pathname):
"""Returns a file:// URL for pathname. Handles OS-specific conversions."""
return urljoin('file:', pathname2url(pathname))
def make_absolute_paths(content):
"""Convert all MEDIA files into a file://URL paths in order to
correctly get it displayed in PDFs."""
overrides = [
{
'root': settings.MEDIA_ROOT,
'url': settings.MEDIA_URL,
},
{
'root': settings.STATIC_ROOT,
'url': settings.STATIC_URL,
}
]
has_scheme = re.compile(r'^[^:/]+://')
for x in overrides:
if not x['url'] or has_scheme.match(x['url']):
continue
root = str(x['root'])
if not root.endswith('/'):
root += '/'
occur_pattern = '''(["|']{0}.*?["|'])'''
occurences = re.findall(occur_pattern.format(x['url']), content)
occurences = list(set(occurences)) # Remove dups
for occur in occurences:
content = content.replace(occur, '"%s"' % (
pathname2fileurl(root) +
occur[1 + len(x['url']): -1]))
return content
def render_to_temporary_file(template, context, request=None, mode='w+b',
bufsize=-1, suffix='.html', prefix='tmp',
dir=None, delete=True):
try:
render = template.render
except AttributeError:
content = loader.render_to_string(template, context)
else:
if django.VERSION < (1, 8):
# If using a version of Django prior to 1.8, ensure ``context`` is an
# instance of ``Context``
if not isinstance(context, Context):
if request:
context = RequestContext(request, context)
else:
context = Context(context)
# Handle error when ``request`` is None
content = render(context)
else:
content = render(context, request)
content = smart_str(content)
content = make_absolute_paths(content)
try:
# Python3 has 'buffering' arg instead of 'bufsize'
tempfile = NamedTemporaryFile(mode=mode, buffering=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
except TypeError:
tempfile = NamedTemporaryFile(mode=mode, bufsize=bufsize,
suffix=suffix, prefix=prefix,
dir=dir, delete=delete)
try:
tempfile.write(content.encode('utf-8'))
tempfile.flush()
return tempfile
except:
# Clean-up tempfile if an Exception is raised.
tempfile.close()
raise
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mysqlctl.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mysqlctl.proto',
package='mysqlctl',
syntax='proto3',
serialized_pb=_b('\n\x0emysqlctl.proto\x12\x08mysqlctl\"#\n\x0cStartRequest\x12\x13\n\x0bmysqld_args\x18\x01 \x03(\t\"\x0f\n\rStartResponse\"*\n\x0fShutdownRequest\x12\x17\n\x0fwait_for_mysqld\x18\x01 \x01(\x08\"\x12\n\x10ShutdownResponse\"\x18\n\x16RunMysqlUpgradeRequest\"\x19\n\x17RunMysqlUpgradeResponse\"\x15\n\x13ReinitConfigRequest\"\x16\n\x14ReinitConfigResponse2\xb6\x02\n\x08MysqlCtl\x12:\n\x05Start\x12\x16.mysqlctl.StartRequest\x1a\x17.mysqlctl.StartResponse\"\x00\x12\x43\n\x08Shutdown\x12\x19.mysqlctl.ShutdownRequest\x1a\x1a.mysqlctl.ShutdownResponse\"\x00\x12X\n\x0fRunMysqlUpgrade\x12 .mysqlctl.RunMysqlUpgradeRequest\x1a!.mysqlctl.RunMysqlUpgradeResponse\"\x00\x12O\n\x0cReinitConfig\x12\x1d.mysqlctl.ReinitConfigRequest\x1a\x1e.mysqlctl.ReinitConfigResponse\"\x00\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STARTREQUEST = _descriptor.Descriptor(
name='StartRequest',
full_name='mysqlctl.StartRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mysqld_args', full_name='mysqlctl.StartRequest.mysqld_args', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=28,
serialized_end=63,
)
_STARTRESPONSE = _descriptor.Descriptor(
name='StartResponse',
full_name='mysqlctl.StartResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=80,
)
_SHUTDOWNREQUEST = _descriptor.Descriptor(
name='ShutdownRequest',
full_name='mysqlctl.ShutdownRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='wait_for_mysqld', full_name='mysqlctl.ShutdownRequest.wait_for_mysqld', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=82,
serialized_end=124,
)
_SHUTDOWNRESPONSE = _descriptor.Descriptor(
name='ShutdownResponse',
full_name='mysqlctl.ShutdownResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=144,
)
_RUNMYSQLUPGRADEREQUEST = _descriptor.Descriptor(
name='RunMysqlUpgradeRequest',
full_name='mysqlctl.RunMysqlUpgradeRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=146,
serialized_end=170,
)
_RUNMYSQLUPGRADERESPONSE = _descriptor.Descriptor(
name='RunMysqlUpgradeResponse',
full_name='mysqlctl.RunMysqlUpgradeResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=172,
serialized_end=197,
)
_REINITCONFIGREQUEST = _descriptor.Descriptor(
name='ReinitConfigRequest',
full_name='mysqlctl.ReinitConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=199,
serialized_end=220,
)
_REINITCONFIGRESPONSE = _descriptor.Descriptor(
name='ReinitConfigResponse',
full_name='mysqlctl.ReinitConfigResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=222,
serialized_end=244,
)
DESCRIPTOR.message_types_by_name['StartRequest'] = _STARTREQUEST
DESCRIPTOR.message_types_by_name['StartResponse'] = _STARTRESPONSE
DESCRIPTOR.message_types_by_name['ShutdownRequest'] = _SHUTDOWNREQUEST
DESCRIPTOR.message_types_by_name['ShutdownResponse'] = _SHUTDOWNRESPONSE
DESCRIPTOR.message_types_by_name['RunMysqlUpgradeRequest'] = _RUNMYSQLUPGRADEREQUEST
DESCRIPTOR.message_types_by_name['RunMysqlUpgradeResponse'] = _RUNMYSQLUPGRADERESPONSE
DESCRIPTOR.message_types_by_name['ReinitConfigRequest'] = _REINITCONFIGREQUEST
DESCRIPTOR.message_types_by_name['ReinitConfigResponse'] = _REINITCONFIGRESPONSE
StartRequest = _reflection.GeneratedProtocolMessageType('StartRequest', (_message.Message,), dict(
DESCRIPTOR = _STARTREQUEST,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.StartRequest)
))
_sym_db.RegisterMessage(StartRequest)
StartResponse = _reflection.GeneratedProtocolMessageType('StartResponse', (_message.Message,), dict(
DESCRIPTOR = _STARTRESPONSE,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.StartResponse)
))
_sym_db.RegisterMessage(StartResponse)
ShutdownRequest = _reflection.GeneratedProtocolMessageType('ShutdownRequest', (_message.Message,), dict(
DESCRIPTOR = _SHUTDOWNREQUEST,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.ShutdownRequest)
))
_sym_db.RegisterMessage(ShutdownRequest)
ShutdownResponse = _reflection.GeneratedProtocolMessageType('ShutdownResponse', (_message.Message,), dict(
DESCRIPTOR = _SHUTDOWNRESPONSE,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.ShutdownResponse)
))
_sym_db.RegisterMessage(ShutdownResponse)
RunMysqlUpgradeRequest = _reflection.GeneratedProtocolMessageType('RunMysqlUpgradeRequest', (_message.Message,), dict(
DESCRIPTOR = _RUNMYSQLUPGRADEREQUEST,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.RunMysqlUpgradeRequest)
))
_sym_db.RegisterMessage(RunMysqlUpgradeRequest)
RunMysqlUpgradeResponse = _reflection.GeneratedProtocolMessageType('RunMysqlUpgradeResponse', (_message.Message,), dict(
DESCRIPTOR = _RUNMYSQLUPGRADERESPONSE,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.RunMysqlUpgradeResponse)
))
_sym_db.RegisterMessage(RunMysqlUpgradeResponse)
ReinitConfigRequest = _reflection.GeneratedProtocolMessageType('ReinitConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _REINITCONFIGREQUEST,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.ReinitConfigRequest)
))
_sym_db.RegisterMessage(ReinitConfigRequest)
ReinitConfigResponse = _reflection.GeneratedProtocolMessageType('ReinitConfigResponse', (_message.Message,), dict(
DESCRIPTOR = _REINITCONFIGRESPONSE,
__module__ = 'mysqlctl_pb2'
# @@protoc_insertion_point(class_scope:mysqlctl.ReinitConfigResponse)
))
_sym_db.RegisterMessage(ReinitConfigResponse)
import abc
from grpc.beta import implementations as beta_implementations
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class BetaMysqlCtlServicer(object):
"""<fill me in later!>"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def Start(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def Shutdown(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def RunMysqlUpgrade(self, request, context):
raise NotImplementedError()
@abc.abstractmethod
def ReinitConfig(self, request, context):
raise NotImplementedError()
class BetaMysqlCtlStub(object):
"""The interface to which stubs will conform."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def Start(self, request, timeout):
raise NotImplementedError()
Start.future = None
@abc.abstractmethod
def Shutdown(self, request, timeout):
raise NotImplementedError()
Shutdown.future = None
@abc.abstractmethod
def RunMysqlUpgrade(self, request, timeout):
raise NotImplementedError()
RunMysqlUpgrade.future = None
@abc.abstractmethod
def ReinitConfig(self, request, timeout):
raise NotImplementedError()
ReinitConfig.future = None
def beta_create_MysqlCtl_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
request_deserializers = {
('mysqlctl.MysqlCtl', 'ReinitConfig'): mysqlctl_pb2.ReinitConfigRequest.FromString,
('mysqlctl.MysqlCtl', 'RunMysqlUpgrade'): mysqlctl_pb2.RunMysqlUpgradeRequest.FromString,
('mysqlctl.MysqlCtl', 'Shutdown'): mysqlctl_pb2.ShutdownRequest.FromString,
('mysqlctl.MysqlCtl', 'Start'): mysqlctl_pb2.StartRequest.FromString,
}
response_serializers = {
('mysqlctl.MysqlCtl', 'ReinitConfig'): mysqlctl_pb2.ReinitConfigResponse.SerializeToString,
('mysqlctl.MysqlCtl', 'RunMysqlUpgrade'): mysqlctl_pb2.RunMysqlUpgradeResponse.SerializeToString,
('mysqlctl.MysqlCtl', 'Shutdown'): mysqlctl_pb2.ShutdownResponse.SerializeToString,
('mysqlctl.MysqlCtl', 'Start'): mysqlctl_pb2.StartResponse.SerializeToString,
}
method_implementations = {
('mysqlctl.MysqlCtl', 'ReinitConfig'): face_utilities.unary_unary_inline(servicer.ReinitConfig),
('mysqlctl.MysqlCtl', 'RunMysqlUpgrade'): face_utilities.unary_unary_inline(servicer.RunMysqlUpgrade),
('mysqlctl.MysqlCtl', 'Shutdown'): face_utilities.unary_unary_inline(servicer.Shutdown),
('mysqlctl.MysqlCtl', 'Start'): face_utilities.unary_unary_inline(servicer.Start),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_MysqlCtl_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
import mysqlctl_pb2
request_serializers = {
('mysqlctl.MysqlCtl', 'ReinitConfig'): mysqlctl_pb2.ReinitConfigRequest.SerializeToString,
('mysqlctl.MysqlCtl', 'RunMysqlUpgrade'): mysqlctl_pb2.RunMysqlUpgradeRequest.SerializeToString,
('mysqlctl.MysqlCtl', 'Shutdown'): mysqlctl_pb2.ShutdownRequest.SerializeToString,
('mysqlctl.MysqlCtl', 'Start'): mysqlctl_pb2.StartRequest.SerializeToString,
}
response_deserializers = {
('mysqlctl.MysqlCtl', 'ReinitConfig'): mysqlctl_pb2.ReinitConfigResponse.FromString,
('mysqlctl.MysqlCtl', 'RunMysqlUpgrade'): mysqlctl_pb2.RunMysqlUpgradeResponse.FromString,
('mysqlctl.MysqlCtl', 'Shutdown'): mysqlctl_pb2.ShutdownResponse.FromString,
('mysqlctl.MysqlCtl', 'Start'): mysqlctl_pb2.StartResponse.FromString,
}
cardinalities = {
'ReinitConfig': cardinality.Cardinality.UNARY_UNARY,
'RunMysqlUpgrade': cardinality.Cardinality.UNARY_UNARY,
'Shutdown': cardinality.Cardinality.UNARY_UNARY,
'Start': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'mysqlctl.MysqlCtl', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| |
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import queue as Queue
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from ovs.db import idl
from neutron.agent.ovsdb import api
from neutron.agent.ovsdb.native import commands as cmd
from neutron.agent.ovsdb.native import connection
from neutron.agent.ovsdb.native import helpers
from neutron.agent.ovsdb.native import idlutils
from neutron.i18n import _LE
OPTS = [
cfg.StrOpt('ovsdb_connection',
default='tcp:127.0.0.1:6640',
help=_('The connection string for the native OVSDB backend')),
]
cfg.CONF.register_opts(OPTS, 'OVS')
# TODO(twilson) DEFAULT.ovs_vsctl_timeout should be OVS.vsctl_timeout
cfg.CONF.import_opt('ovs_vsctl_timeout', 'neutron.agent.common.ovs_lib')
LOG = logging.getLogger(__name__)
class Transaction(api.Transaction):
def __init__(self, api, ovsdb_connection, timeout,
check_error=False, log_errors=False):
self.api = api
self.check_error = check_error
self.log_errors = log_errors
self.commands = []
self.results = Queue.Queue(1)
self.ovsdb_connection = ovsdb_connection
self.timeout = timeout
def add(self, command):
"""Add a command to the transaction
returns The command passed as a convenience
"""
self.commands.append(command)
return command
def commit(self):
self.ovsdb_connection.queue_txn(self)
result = self.results.get()
if self.check_error:
if isinstance(result, idlutils.ExceptionResult):
if self.log_errors:
LOG.error(result.tb)
raise result.ex
return result
def do_commit(self):
start_time = time.time()
attempts = 0
while True:
elapsed_time = time.time() - start_time
if attempts > 0 and elapsed_time > self.timeout:
raise RuntimeError("OVS transaction timed out")
attempts += 1
# TODO(twilson) Make sure we don't loop longer than vsctl_timeout
txn = idl.Transaction(self.api.idl)
for i, command in enumerate(self.commands):
LOG.debug("Running txn command(idx=%(idx)s): %(cmd)s",
{'idx': i, 'cmd': command})
try:
command.run_idl(txn)
except Exception:
with excutils.save_and_reraise_exception() as ctx:
txn.abort()
if not self.check_error:
ctx.reraise = False
seqno = self.api.idl.change_seqno
status = txn.commit_block()
if status == txn.TRY_AGAIN:
LOG.debug("OVSDB transaction returned TRY_AGAIN, retrying")
idlutils.wait_for_change(
self.api.idl, self.timeout - elapsed_time,
seqno)
continue
elif status == txn.ERROR:
msg = _LE("OVSDB Error: %s") % txn.get_error()
if self.log_errors:
LOG.error(msg)
if self.check_error:
# For now, raise similar error to vsctl/utils.execute()
raise RuntimeError(msg)
return
elif status == txn.ABORTED:
LOG.debug("Transaction aborted")
return
elif status == txn.UNCHANGED:
LOG.debug("Transaction caused no change")
return [cmd.result for cmd in self.commands]
class OvsdbIdl(api.API):
ovsdb_connection = connection.Connection(cfg.CONF.OVS.ovsdb_connection,
cfg.CONF.ovs_vsctl_timeout,
'Open_vSwitch')
def __init__(self, context):
super(OvsdbIdl, self).__init__(context)
# it's a chicken and egg problem: by default, the manager that
# corresponds to the connection URI is in most cases not enabled in
# local ovsdb, so we still need ovs-vsctl to set it to allow
# connections
helpers.enable_connection_uri(self.ovsdb_connection.connection)
OvsdbIdl.ovsdb_connection.start()
self.idl = OvsdbIdl.ovsdb_connection.idl
@property
def _tables(self):
return self.idl.tables
@property
def _ovs(self):
return list(self._tables['Open_vSwitch'].rows.values())[0]
def transaction(self, check_error=False, log_errors=True, **kwargs):
return Transaction(self, OvsdbIdl.ovsdb_connection,
self.context.vsctl_timeout,
check_error, log_errors)
def add_br(self, name, may_exist=True, datapath_type=None):
return cmd.AddBridgeCommand(self, name, may_exist, datapath_type)
def del_br(self, name, if_exists=True):
return cmd.DelBridgeCommand(self, name, if_exists)
def br_exists(self, name):
return cmd.BridgeExistsCommand(self, name)
def port_to_br(self, name):
return cmd.PortToBridgeCommand(self, name)
def iface_to_br(self, name):
return cmd.InterfaceToBridgeCommand(self, name)
def list_br(self):
return cmd.ListBridgesCommand(self)
def br_get_external_id(self, name, field):
return cmd.BrGetExternalIdCommand(self, name, field)
def br_set_external_id(self, name, field, value):
return cmd.BrSetExternalIdCommand(self, name, field, value)
def db_create(self, table, **col_values):
return cmd.DbCreateCommand(self, table, **col_values)
def db_destroy(self, table, record):
return cmd.DbDestroyCommand(self, table, record)
def db_set(self, table, record, *col_values):
return cmd.DbSetCommand(self, table, record, *col_values)
def db_clear(self, table, record, column):
return cmd.DbClearCommand(self, table, record, column)
def db_get(self, table, record, column):
return cmd.DbGetCommand(self, table, record, column)
def db_list(self, table, records=None, columns=None, if_exists=False):
return cmd.DbListCommand(self, table, records, columns, if_exists)
def db_find(self, table, *conditions, **kwargs):
return cmd.DbFindCommand(self, table, *conditions, **kwargs)
def set_controller(self, bridge, controllers):
return cmd.SetControllerCommand(self, bridge, controllers)
def del_controller(self, bridge):
return cmd.DelControllerCommand(self, bridge)
def get_controller(self, bridge):
return cmd.GetControllerCommand(self, bridge)
def set_fail_mode(self, bridge, mode):
return cmd.SetFailModeCommand(self, bridge, mode)
def add_port(self, bridge, port, may_exist=True):
return cmd.AddPortCommand(self, bridge, port, may_exist)
def del_port(self, port, bridge=None, if_exists=True):
return cmd.DelPortCommand(self, port, bridge, if_exists)
def list_ports(self, bridge):
return cmd.ListPortsCommand(self, bridge)
def list_ifaces(self, bridge):
return cmd.ListIfacesCommand(self, bridge)
| |
## Module statistics.py
##
## Copyright (c) 2013 Steven D'Aprano <steve+python@pearwood.info>.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""
Basic statistics module.
This module provides functions for calculating statistics of data, including
averages, variance, and standard deviation.
Calculating averages
--------------------
================== =============================================
Function Description
================== =============================================
mean Arithmetic mean (average) of data.
median Median (middle value) of data.
median_low Low median of data.
median_high High median of data.
median_grouped Median, or 50th percentile, of grouped data.
mode Mode (most common value) of data.
================== =============================================
Calculate the arithmetic mean ("the average") of data:
>>> mean([-1.0, 2.5, 3.25, 5.75])
2.625
Calculate the standard median of discrete data:
>>> median([2, 3, 4, 5])
3.5
Calculate the median, or 50th percentile, of data grouped into class intervals
centred on the data values provided. E.g. if your data points are rounded to
the nearest whole number:
>>> median_grouped([2, 2, 3, 3, 3, 4]) #doctest: +ELLIPSIS
2.8333333333...
This should be interpreted in this way: you have two data points in the class
interval 1.5-2.5, three data points in the class interval 2.5-3.5, and one in
the class interval 3.5-4.5. The median of these data points is 2.8333...
Calculating variability or spread
---------------------------------
================== =============================================
Function Description
================== =============================================
pvariance Population variance of data.
variance Sample variance of data.
pstdev Population standard deviation of data.
stdev Sample standard deviation of data.
================== =============================================
Calculate the standard deviation of sample data:
>>> stdev([2.5, 3.25, 5.5, 11.25, 11.75]) #doctest: +ELLIPSIS
4.38961843444...
If you have previously calculated the mean, you can pass it as the optional
second argument to the four "spread" functions to avoid recalculating it:
>>> data = [1, 2, 2, 4, 4, 4, 5, 6]
>>> mu = mean(data)
>>> pvariance(data, mu)
2.5
Exceptions
----------
A single exception is defined: StatisticsError is a subclass of ValueError.
"""
__all__ = [ 'StatisticsError',
'pstdev', 'pvariance', 'stdev', 'variance',
'median', 'median_low', 'median_high', 'median_grouped',
'mean', 'mode',
]
import collections
import math
from fractions import Fraction
from decimal import Decimal
from itertools import groupby
# === Exceptions ===
class StatisticsError(ValueError):
pass
# === Private utilities ===
def _sum(data, start=0):
"""_sum(data [, start]) -> (type, sum, count)
Return a high-precision sum of the given numeric data as a fraction,
together with the type to be converted to and the count of items.
If optional argument ``start`` is given, it is added to the total.
If ``data`` is empty, ``start`` (defaulting to 0) is returned.
Examples
--------
>>> _sum([3, 2.25, 4.5, -0.5, 1.0], 0.75)
(<class 'float'>, Fraction(11, 1), 5)
Some sources of round-off error will be avoided:
>>> _sum([1e50, 1, -1e50] * 1000) # Built-in sum returns zero.
(<class 'float'>, Fraction(1000, 1), 3000)
Fractions and Decimals are also supported:
>>> from fractions import Fraction as F
>>> _sum([F(2, 3), F(7, 5), F(1, 4), F(5, 6)])
(<class 'fractions.Fraction'>, Fraction(63, 20), 4)
>>> from decimal import Decimal as D
>>> data = [D("0.1375"), D("0.2108"), D("0.3061"), D("0.0419")]
>>> _sum(data)
(<class 'decimal.Decimal'>, Fraction(6963, 10000), 4)
Mixed types are currently treated as an error, except that int is
allowed.
"""
count = 0
n, d = _exact_ratio(start)
partials = {d: n}
partials_get = partials.get
T = _coerce(int, type(start))
for typ, values in groupby(data, type):
T = _coerce(T, typ) # or raise TypeError
for n,d in map(_exact_ratio, values):
count += 1
partials[d] = partials_get(d, 0) + n
if None in partials:
# The sum will be a NAN or INF. We can ignore all the finite
# partials, and just look at this special one.
total = partials[None]
assert not _isfinite(total)
else:
# Sum all the partial sums using builtin sum.
# FIXME is this faster if we sum them in order of the denominator?
total = sum(Fraction(n, d) for d, n in sorted(partials.items()))
return (T, total, count)
def _isfinite(x):
try:
return x.is_finite() # Likely a Decimal.
except AttributeError:
return math.isfinite(x) # Coerces to float first.
def _coerce(T, S):
"""Coerce types T and S to a common type, or raise TypeError.
Coercion rules are currently an implementation detail. See the CoerceTest
test class in test_statistics for details.
"""
# See http://bugs.python.org/issue24068.
assert T is not bool, "initial type T is bool"
# If the types are the same, no need to coerce anything. Put this
# first, so that the usual case (no coercion needed) happens as soon
# as possible.
if T is S: return T
# Mixed int & other coerce to the other type.
if S is int or S is bool: return T
if T is int: return S
# If one is a (strict) subclass of the other, coerce to the subclass.
if issubclass(S, T): return S
if issubclass(T, S): return T
# Ints coerce to the other type.
if issubclass(T, int): return S
if issubclass(S, int): return T
# Mixed fraction & float coerces to float (or float subclass).
if issubclass(T, Fraction) and issubclass(S, float):
return S
if issubclass(T, float) and issubclass(S, Fraction):
return T
# Any other combination is disallowed.
msg = "don't know how to coerce %s and %s"
raise TypeError(msg % (T.__name__, S.__name__))
def _exact_ratio(x):
"""Return Real number x to exact (numerator, denominator) pair.
>>> _exact_ratio(0.25)
(1, 4)
x is expected to be an int, Fraction, Decimal or float.
"""
try:
# Optimise the common case of floats. We expect that the most often
# used numeric type will be builtin floats, so try to make this as
# fast as possible.
if type(x) is float:
return x.as_integer_ratio()
try:
# x may be an int, Fraction, or Integral ABC.
return (x.numerator, x.denominator)
except AttributeError:
try:
# x may be a float subclass.
return x.as_integer_ratio()
except AttributeError:
try:
# x may be a Decimal.
return _decimal_to_ratio(x)
except AttributeError:
# Just give up?
pass
except (OverflowError, ValueError):
# float NAN or INF.
assert not math.isfinite(x)
return (x, None)
msg = "can't convert type '{}' to numerator/denominator"
raise TypeError(msg.format(type(x).__name__))
# FIXME This is faster than Fraction.from_decimal, but still too slow.
def _decimal_to_ratio(d):
"""Convert Decimal d to exact integer ratio (numerator, denominator).
>>> from decimal import Decimal
>>> _decimal_to_ratio(Decimal("2.6"))
(26, 10)
"""
sign, digits, exp = d.as_tuple()
if exp in ('F', 'n', 'N'): # INF, NAN, sNAN
assert not d.is_finite()
return (d, None)
num = 0
for digit in digits:
num = num*10 + digit
if exp < 0:
den = 10**-exp
else:
num *= 10**exp
den = 1
if sign:
num = -num
return (num, den)
def _convert(value, T):
"""Convert value to given numeric type T."""
if type(value) is T:
# This covers the cases where T is Fraction, or where value is
# a NAN or INF (Decimal or float).
return value
if issubclass(T, int) and value.denominator != 1:
T = float
try:
# FIXME: what do we do if this overflows?
return T(value)
except TypeError:
if issubclass(T, Decimal):
return T(value.numerator)/T(value.denominator)
else:
raise
def _counts(data):
# Generate a table of sorted (value, frequency) pairs.
table = collections.Counter(iter(data)).most_common()
if not table:
return table
# Extract the values with the highest frequency.
maxfreq = table[0][1]
for i in range(1, len(table)):
if table[i][1] != maxfreq:
table = table[:i]
break
return table
# === Measures of central tendency (averages) ===
def mean(data):
"""Return the sample arithmetic mean of data.
>>> mean([1, 2, 3, 4, 4])
2.8
>>> from fractions import Fraction as F
>>> mean([F(3, 7), F(1, 21), F(5, 3), F(1, 3)])
Fraction(13, 21)
>>> from decimal import Decimal as D
>>> mean([D("0.5"), D("0.75"), D("0.625"), D("0.375")])
Decimal('0.5625')
If ``data`` is empty, StatisticsError will be raised.
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('mean requires at least one data point')
T, total, count = _sum(data)
assert count == n
return _convert(total/n, T)
# FIXME: investigate ways to calculate medians without sorting? Quickselect?
def median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
i = n//2
return (data[i - 1] + data[i])/2
def median_low(data):
"""Return the low median of numeric data.
When the number of data points is odd, the middle value is returned.
When it is even, the smaller of the two middle values is returned.
>>> median_low([1, 3, 5])
3
>>> median_low([1, 3, 5, 7])
3
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
if n%2 == 1:
return data[n//2]
else:
return data[n//2 - 1]
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
>>> median_high([1, 3, 5])
3
>>> median_high([1, 3, 5, 7])
5
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n//2]
def median_grouped(data, interval=1):
"""Return the 50th percentile (median) of grouped continuous data.
>>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5])
3.7
>>> median_grouped([52, 52, 53, 54])
52.5
This calculates the median as the 50th percentile, and should be
used when your data is continuous and grouped. In the above example,
the values 1, 2, 3, etc. actually represent the midpoint of classes
0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in
class 3.5-4.5, and interpolation is used to estimate it.
Optional argument ``interval`` represents the class interval, and
defaults to 1. Changing the class interval naturally will change the
interpolated 50th percentile value:
>>> median_grouped([1, 3, 3, 5, 7], interval=1)
3.25
>>> median_grouped([1, 3, 3, 5, 7], interval=2)
3.5
This function does not check whether the data points are at least
``interval`` apart.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
elif n == 1:
return data[0]
# Find the value at the midpoint. Remember this corresponds to the
# centre of the class interval.
x = data[n//2]
for obj in (x, interval):
if isinstance(obj, (str, bytes)):
raise TypeError('expected number but got %r' % obj)
try:
L = x - interval/2 # The lower limit of the median interval.
except TypeError:
# Mixed type. For now we just coerce to float.
L = float(x) - float(interval)/2
cf = data.index(x) # Number of values below the median interval.
# FIXME The following line could be more efficient for big lists.
f = data.count(x) # Number of data points in the median interval.
return L + interval*(n/2 - cf)/f
def mode(data):
"""Return the most common data point from discrete or nominal data.
``mode`` assumes discrete data, and returns a single value. This is the
standard treatment of the mode as commonly taught in schools:
>>> mode([1, 1, 2, 3, 3, 3, 3, 4])
3
This also works with nominal (non-numeric) data:
>>> mode(["red", "blue", "blue", "red", "green", "red", "red"])
'red'
If there is not exactly one most common value, ``mode`` will raise
StatisticsError.
"""
# Generate a table of sorted (value, frequency) pairs.
table = _counts(data)
if len(table) == 1:
return table[0][0]
elif table:
raise StatisticsError(
'no unique mode; found %d equally common values' % len(table)
)
else:
raise StatisticsError('no mode for empty data')
# === Measures of spread ===
# See http://mathworld.wolfram.com/Variance.html
# http://mathworld.wolfram.com/SampleVariance.html
# http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
#
# Under no circumstances use the so-called "computational formula for
# variance", as that is only suitable for hand calculations with a small
# amount of low-precision data. It has terrible numeric properties.
#
# See a comparison of three computational methods here:
# http://www.johndcook.com/blog/2008/09/26/comparing-three-methods-of-computing-standard-deviation/
def _ss(data, c=None):
"""Return sum of square deviations of sequence data.
If ``c`` is None, the mean is calculated in one pass, and the deviations
from the mean are calculated in a second pass. Otherwise, deviations are
calculated from ``c`` as given. Use the second case with care, as it can
lead to garbage results.
"""
if c is None:
c = mean(data)
T, total, count = _sum((x-c)**2 for x in data)
# The following sum should mathematically equal zero, but due to rounding
# error may not.
U, total2, count2 = _sum((x-c) for x in data)
assert T == U and count == count2
total -= total2**2/len(data)
assert not total < 0, 'negative sum of square deviations: %f' % total
return (T, total)
def variance(data, xbar=None):
"""Return the sample variance of data.
data should be an iterable of Real-valued numbers, with at least two
values. The optional argument xbar, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function when your data is a sample from a population. To
calculate the variance from the entire population, see ``pvariance``.
Examples:
>>> data = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
>>> variance(data)
1.3720238095238095
If you have already calculated the mean of your data, you can pass it as
the optional second argument ``xbar`` to avoid recalculating it:
>>> m = mean(data)
>>> variance(data, m)
1.3720238095238095
This function does not check that ``xbar`` is actually the mean of
``data``. Giving arbitrary values for ``xbar`` may lead to invalid or
impossible results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> variance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('31.01875')
>>> from fractions import Fraction as F
>>> variance([F(1, 6), F(1, 2), F(5, 3)])
Fraction(67, 108)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 2:
raise StatisticsError('variance requires at least two data points')
T, ss = _ss(data, xbar)
return _convert(ss/(n-1), T)
def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
Examples:
>>> data = [0.0, 0.25, 0.25, 1.25, 1.5, 1.75, 2.75, 3.25]
>>> pvariance(data)
1.25
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
>>> mu = mean(data)
>>> pvariance(data, mu)
1.25
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
>>> from decimal import Decimal as D
>>> pvariance([D("27.5"), D("30.25"), D("30.25"), D("34.5"), D("41.75")])
Decimal('24.815')
>>> from fractions import Fraction as F
>>> pvariance([F(1, 4), F(5, 4), F(1, 2)])
Fraction(13, 72)
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
T, ss = _ss(data, mu)
return _convert(ss/n, T)
def stdev(data, xbar=None):
"""Return the square root of the sample variance.
See ``variance`` for arguments and other details.
>>> stdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
1.0810874155219827
"""
var = variance(data, xbar)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
>>> pstdev([1.5, 2.5, 2.5, 2.75, 3.25, 4.75])
0.986893273527251
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow statements: loops, conditionals, etc.
Note: most of these operators accept pairs of get_state/set_state functions, to
capture mutations that the corresponding code blocks might make. These
mutations only need to be captured when staging the control flow, and they just
work when reverting to Python behavior.
__Examples__
```
while cond:
self.x += i
```
When the functionalized version is executed as a Python loop, it just works:
```
def loop_body():
self.x += i # works as expected for Python loops
```
But it won't work for TF loops:
```
def loop_body():
self.x += i # self.x has the wrong value!
```
get_state/set_state allow piping the mutations through the loop variables as
well, in effect changing the loop body:
```
def loop_body(self_x):
self.x = self_x # self.x now has the proper value
self.x += i # the original block
self_x = self.x # write self.x back into the loop vars
return self_x
self_x = tf.while_loop(...)
self.x = self_x # the result is not properly captured
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import traceback
import numpy as np
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.operators import variables
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.autograph.utils import misc
from tensorflow.python.autograph.utils import tensors
from tensorflow.python.data.experimental.ops import take_while_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.types import distribute
from tensorflow.python.util import nest
PYTHON_MAX_ITERATIONS = 100000000 # Fails in about one minute for empty loops.
WARN_INEFFICIENT_UNROLL = True
INEFFICIENT_UNROLL_MIN_ITERATIONS = 50000
INEFFICIENT_UNROLL_MIN_OPS = 1
# TODO(mdan): Use the custom operator pattern instead of type dispatch.
# An example of this pattern is found in the implementation of distributed
# datasets. Before it can be used though, we need to standardize the interface.
def _is_none_or_undef(value):
"""Tests whether a value is None or undefined.
AutoGraph represents undefined symbols using special objects of type Undefined
or UndefinedReturnValue.
Args:
value: value to test
Returns:
Boolean
"""
return ((value is None)
or isinstance(value, variables.UndefinedReturnValue)
or isinstance(value, variables.Undefined))
def _verify_tf_condition(cond, tag):
"""Ensures that the condition can be used in a TF control flow."""
extra_hint = 'to check for None, use `is not None`'
cond = ops.convert_to_tensor_v2(cond)
if cond.dtype != dtypes.bool:
raise ValueError(
'condition of {} expected to be `tf.bool` scalar, got {}'
'; to use as boolean Tensor, use `tf.cast`'
'; {}'.format(tag, cond, extra_hint))
if cond.shape is None or cond.shape.ndims is None:
# TODO(mdan): Consider a explicit size check, if not too slow.
cond = array_ops.reshape(cond, ())
elif cond.shape.ndims > 0:
known_dims = [d for d in cond.shape.as_list() if d is not None]
if np.prod(known_dims) > 1:
raise ValueError(
'condition of {} expected to be `tf.bool` scalar, got {}'
'; {}'.format(tag, cond, extra_hint))
else:
cond = array_ops.reshape(cond, ())
return cond
def _verify_loop_init_vars(init_vars, symbol_names, first_iter_vars=None):
"""Ensures that all values in the state are valid to use in a TF loop.
The init_vars may contain placeholder values derived from first_iter_vars.
Args:
init_vars: initial loop variables (as taken before entering the loop)
symbol_names: corresponding names of the initial loop variables
first_iter_vars: loop variables after one iteration of the loop
"""
if not symbol_names:
return
if first_iter_vars is None:
first_iter_vars = (None,) * len(symbol_names)
assert len(symbol_names) == len(init_vars)
assert len(symbol_names) == len(first_iter_vars)
for name, val, fi_val in zip(symbol_names, init_vars, first_iter_vars):
if isinstance(val, variables.UndefinedReturnValue):
if fi_val:
raise ValueError(
'the return value from a TensorFlow loop may only be a {}; got {}'
.format(LEGAL_LOOP_TYPES, type(fi_val)))
else:
# TODO(mdan): This can be handled by removing the return value.
raise NotImplementedError(
'a return statement cannot be placed inside this TensorFlow loop;'
' this may happen if a return statement depends on a'
' static Python condition such as a hyperparameter')
error_msg = None
if val is None:
error_msg = "'{}' may not be None before the loop".format(name)
elif isinstance(val, variables.Undefined):
error_msg = "'{}' must be defined before the loop".format(name)
# This only happens when we could not infer a placeholder for the
# variable. The canonical case when that happens is when _placeholder_value
# couldnot infer a placeholder for it. That means it's of an unknown type
# or it's still undefined after staging one iteration.
if error_msg is not None:
if fi_val:
error_msg += (", unless it's a {}; got {}".format(
LEGAL_LOOP_TYPES, type(fi_val)))
else:
# TODO(mdan): This can be handled by removing the loop var.
error_msg += '.'
raise ValueError(error_msg)
def _is_subshape(left, right):
"""Returns True if left shape is at least as specific as right shape."""
# TODO(mdan): This code should be in TensorShape.
# Note: this is not the same as TensorShape.is_compatible_with, which is
# symmetric.
# This code also duplicates _ShapeLessThanOrEqual from control_flow_ops.py.
if right.dims is None:
return True
if left.ndims != right.ndims:
return False
for ldim, rdim in zip(left.dims, right.dims):
if rdim.value is not None and ldim.value != rdim.value:
return False
return True
# TODO(mdan): Remove these verifications once TF ops can properly report names.
def _verify_single_loop_var(
name, check_shape, init, entry, exit_, shape_invariant):
"""Verifies whether the initial, entry and exit values are consistent."""
assert entry is not None, "no TF op should set '{}' to None?".format(name)
if exit_ is None:
raise ValueError("'{}' is None at the end of the iteration.".format(name))
if isinstance(init, (bool, int, float, str, np.ndarray)):
init = ops.convert_to_tensor_v2(init)
if isinstance(entry, (bool, int, float, str, np.ndarray)):
entry = ops.convert_to_tensor_v2(entry)
if isinstance(exit_, (bool, int, float, str, np.ndarray)):
exit_ = ops.convert_to_tensor_v2(exit_)
if (not tensor_util.is_tf_type(entry) or
not tensor_util.is_tf_type(exit_)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(entry, 'dtype') or
not hasattr(exit_, 'dtype')):
return
if (not hasattr(entry, 'shape') or
not hasattr(exit_, 'shape')):
return
if entry.dtype != exit_.dtype:
raise TypeError(
"'{}' has dtype {} before the loop, but dtype {} after one"
' iteration'.format(
name,
entry.dtype.name,
exit_.dtype.name,
))
if check_shape:
exit_shape = exit_.shape
if shape_invariant is None:
entry_shape = entry.shape
if not _is_subshape(exit_shape, entry_shape):
raise ValueError(
"'{}' has shape {} before the loop, but shape {} after one"
' iteration. Use tf.autograph.experimental.set_loop_options to set'
' shape invariants.'.format(name, entry_shape, exit_shape))
else:
init_shape = init.shape
if not _is_subshape(init_shape, shape_invariant):
raise ValueError(
"'{}' has shape {} before the loop, which does not conform with"
' the shape invariant {}.'.format(name, init_shape,
shape_invariant))
if not _is_subshape(exit_shape, shape_invariant):
raise ValueError(
"'{}' has shape {} after one iteration, which does not conform with"
' the shape invariant {}.'.format(
name, exit_shape, shape_invariant))
def _verify_tf_loop_vars(init_vars,
iter_entry_vars,
iter_exit_vars,
symbol_names,
opts,
check_shapes=True):
"""Verifies loop variables for consistency."""
if check_shapes and 'shape_invariants' in opts:
shape_invariants = opts['shape_invariants']
else:
shape_invariants = nest.map_structure(lambda _: None, iter_entry_vars)
assert len(symbol_names) == len(shape_invariants)
assert len(symbol_names) == len(init_vars)
assert len(symbol_names) == len(iter_entry_vars)
assert len(symbol_names) == len(iter_exit_vars)
for i in range(len(symbol_names)):
name = symbol_names[i]
init = init_vars[i]
entry = iter_entry_vars[i]
exit_ = iter_exit_vars[i]
invariant = shape_invariants[i]
try:
nest.assert_same_structure(init, entry, expand_composites=True)
nest.assert_same_structure(entry, exit_, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError("'{}' does not have the same nested structure after one"
' iteration.\n\n{}'.format(name, e))
if invariant is not None:
try:
nest.assert_same_structure(init, invariant, expand_composites=False)
except (ValueError, TypeError) as e:
raise TypeError("'{}' does not have the same nested structure as its"
' corresponding shape invariant.\n\n{}'.format(name, e))
nest.map_structure(
functools.partial(_verify_single_loop_var, name, check_shapes), init,
entry, exit_, invariant)
def verify_single_cond_var(name, body_var, orelse_var):
"""Verifies whether body_var and orelse_var are consistent."""
if body_var is None:
raise ValueError("'{}' is None at the end of the main branch.".format(name))
if orelse_var is None:
raise ValueError(
"'{}' is None at the end of the else branch.".format(name))
if isinstance(body_var, (bool, int, float, str, np.ndarray)):
body_var = ops.convert_to_tensor_v2(body_var)
if isinstance(orelse_var, (bool, int, float, str, np.ndarray)):
orelse_var = ops.convert_to_tensor_v2(orelse_var)
if (not tensor_util.is_tf_type(body_var) or
not tensor_util.is_tf_type(orelse_var)):
return
# TODO(mdan): Properly account for CompositeTensors.
if (not hasattr(body_var, 'dtype') or
not hasattr(orelse_var, 'dtype')):
return
if body_var.dtype != orelse_var.dtype:
raise TypeError(
"'{}' has dtype {} in the main branch, but dtype {} in the else"
' branch'.format(name, body_var.dtype.name,
orelse_var.dtype.name))
def _verify_tf_cond_branch_vars(vars_, symbol_names, branch_name):
"""Verifies variables output by a conditional branch for consistency."""
for name, var_ in zip(symbol_names, vars_):
if isinstance(var_, variables.Undefined):
raise ValueError(
"'{}' must also be initialized in the {} branch".format(
name, branch_name))
if isinstance(var_, variables.UndefinedReturnValue):
raise ValueError(
'the {} branch must also have a return statement.'.format(
branch_name))
def _verify_tf_cond_vars(body_vars, orelse_vars, symbol_names):
"""Verifies variables manipulated by a conditional for consistency."""
named_vars = zip(symbol_names, body_vars, orelse_vars)
for name, body_var, orelse_var in named_vars:
try:
nest.assert_same_structure(body_var, orelse_var, expand_composites=True)
except (ValueError, TypeError) as e:
raise TypeError(
"'{}' must have the same nested structure in the main and else"
' branches:\n\n{}'.format(name, str(e)))
nest.map_structure(
functools.partial(verify_single_cond_var, name), body_var, orelse_var)
def for_stmt(iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a for statement.
The loop operates on a state, which includes all symbols that are
variant across loop iterations, excluding the variables local to the loop.
For example, given the loop below that calculates the geometric and
arithmetic means or some numbers:
```
geo_mean = 1
arith_mean = 0
for i in range(n):
a = numbers[i]
geo_mean *= a
arith_mean += a
```
The state is represented by the variables geo_mean and arith_mean. The
`extra_test`, `body`, `get_state` and `set_state` functions must bind to the
original `geo_mean` and `arith_mean` symbols, using `nonlocal`.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
iter_: The entity being iterated over.
extra_test: Callable with boolean return type.
An additional loop condition.
body: Callable representing the actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing names of the loop variables returned by
get_state.
opts: Optional dict of extra loop parameters.
"""
if tensor_util.is_tf_type(iter_):
if tensors.is_range_tensor(iter_):
_tf_range_for_stmt(iter_, extra_test, body, get_state, set_state,
symbol_names, opts)
elif isinstance(iter_, ragged_tensor.RaggedTensor):
_tf_ragged_for_stmt(iter_, extra_test, body, get_state, set_state,
symbol_names, opts)
else:
_known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, dataset_ops.DatasetV2):
_tf_dataset_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, iterator_ops.OwnedIterator):
_tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, ragged_tensor.RaggedTensor):
_tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, distribute.Iterator):
_tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
elif isinstance(iter_, distribute.Iterable):
# TODO(b/162250181): Use _tf_iterator_for_stmt(iter(iter_)...
_tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts)
else:
_py_for_stmt(iter_, extra_test, body, None, None)
def _py_for_stmt(iter_, extra_test, body, get_state, set_state):
"""Overload of for_stmt that executes a Python for loop."""
del get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body(protected_iter):
original_body(protected_iter)
after_iteration()
before_iteration()
body = protected_body
if extra_test is not None:
if extra_test():
for target in iter_:
body(target)
if not extra_test():
break
else:
for target in iter_:
body(target)
def _add_max_iterations_hint(opts, n):
# TODO(b/159186914): Remove the safeguard, and always set maximum_iterations.
if control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph()):
opts['maximum_iterations'] = n
def _known_len_tf_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF entities that admit a length."""
n = py_builtins.len_(iter_)
# TODO(b/117628877): Revisit performance once XLA has the necessary support.
# Note: using a TensorArray creates an extra copy, but can calculate
# gradients more efficiently than StridedSlice.
ta = tensor_array_ops.TensorArray(iter_.dtype, size=n)
iter_ = ta.unstack(iter_)
iterate_index = 0
def aug_get_state():
return (iterate_index,) + get_state()
def aug_set_state(aug_loop_vars):
nonlocal iterate_index
# TODO(b/171479293): Drop the lint override.
iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
nonlocal iterate_index
body(iter_.read(iterate_index))
iterate_index += 1
def aug_test():
main_test = iterate_index < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_add_max_iterations_hint(opts, n)
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts,
)
def _tf_ragged_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF ragged tensors."""
init_vars = get_state()
_verify_loop_init_vars(init_vars, symbol_names)
# TODO(mdan): Move this into len()? Requires eager support.
if iter_.shape and iter_.shape[0] is not None:
n = iter_.shape[0]
else:
n = iter_.row_lengths()[0]
iterate_index = 0
def aug_get_state():
return (iterate_index,) + get_state()
def aug_set_state(aug_loop_vars):
nonlocal iterate_index
# TODO(b/171479293): Drop the lint override.
iterate_index, *loop_vars = aug_loop_vars # pylint:disable=unused-variable
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
nonlocal iterate_index
body(iter_[iterate_index])
iterate_index += 1
def aug_test():
main_test = iterate_index < n
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_add_max_iterations_hint(opts, n)
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_range_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over a TF range (and elides it)."""
start, limit, delta = iter_.op.inputs
iterate = start
def _value_or(name, var, default):
if (name == opts['iterate_names'] and isinstance(var, variables.Undefined)):
return default
return var
def aug_get_state():
state_vars = get_state()
state_vars = tuple(
_value_or(name, var, iterate)
for name, var in zip(symbol_names, state_vars))
return (iterate,) + state_vars
def aug_set_state(aug_loop_vars):
nonlocal iterate
# TODO(b/171479293): Drop the lint override.
iterate, *loop_vars = aug_loop_vars # pylint:disable=unused-variable
# The iteration index is not "output" by the for loop. If the iterate
# is used outside the loop, it will appear in the loop vars separately.
set_state(loop_vars)
def aug_body():
nonlocal iterate
body(iterate)
iterate += delta
def aug_test():
# TODO(b/159713842): Remove once constant folding works.
const_delta = tensor_util.constant_value(delta)
if const_delta is not None:
if const_delta >= 0:
main_test = iterate < limit
else:
main_test = iterate > limit
else:
main_test = math_ops.logical_or(
math_ops.logical_and(delta >= 0, iterate < limit),
math_ops.logical_and(delta < 0, iterate > limit))
if extra_test is not None:
main_test = control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_add_max_iterations_hint(
opts,
math_ops.cast(misc.get_range_len(start, limit, delta), dtypes.int32))
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
('<internal iterate>',) + symbol_names,
opts)
def _tf_iterator_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF Iterators. See for_loop."""
symbol_names = ('<internal has_next>',) + symbol_names
has_next = True
def aug_get_state():
return (has_next,) + get_state()
def aug_set_state(aug_loop_vars):
nonlocal has_next
# TODO(b/171479293): Drop the lint override.
has_next, *loop_vars = aug_loop_vars # pylint:disable=unused-variable
set_state(loop_vars)
init_vars = aug_get_state()
_verify_loop_init_vars(init_vars, symbol_names)
def aug_body():
"""Main body passed to _tf_while_stmt."""
nonlocal has_next
opt_iterate = iter_.get_next_as_optional()
has_next = opt_iterate.has_value()
loop_vars = aug_get_state() # updated by set_state() in _tf_while_loop.
def main_path():
body(opt_iterate.get_value())
new_loop_vars = aug_get_state()
# Note: this verification duplicates the one performed in tf_while_stmt,
# but needs to be done earlier to prevent the tf.cond from blowing up
# first.
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
def noop_path():
return loop_vars
# TODO(mdan): If tf.while_loop supported Optional, this could be avoided.
# Calling set_state so that get_state() _tf_while_loop sees the conditional
# tensors.
aug_set_state(
control_flow_ops.cond(has_next, main_path, noop_path))
def aug_test():
# This value takes a complicated path to get here:
# prev_iteration_body -> get_state -> tf.while_loop (as loop var)
# -> current_iteration_body -> set_state -> has_next
main_test = has_next
if extra_test is not None:
return control_flow_ops.cond(main_test, extra_test, lambda: False)
return main_test
_tf_while_stmt(
aug_test,
aug_body,
aug_get_state,
aug_set_state,
symbol_names,
opts)
def _general_purpose_scan(ds, init_state, body):
"""Variant of Dataset.scan with semantics of general-purpose computation."""
# Datasets are typically intended for data preprocessing. However, in
# autograph loops they usually appear as general-purpose computations (for
# example, a custom training loop). These two use cases require significantly
# different optimization policies, the most important of which is the device
# placement. The flag override for use_default_device below instructs the
# runtime to treat the computation as general-purpose, rather than data
# preprocessing.
# TODO(mdan): s/use_default_device/specialize_for_input_pipeline.
# TODO(mdan): Don't use private symbols.
# pylint:disable=protected-access
return dataset_ops._ScanDataset(
ds, init_state, body, use_default_device=False)
def _tf_dataset_for_stmt(
ds, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of _dataset_for_stmt with early stopping. See for_stmt."""
# Note: This is easier to follow with the insight that the computations in
# a dataset pipeline are transposed (aka fused).
# For example, given a pipeline input -> scan -> take_while -> reduce,
# and a dataset with input [1, 2, 3], the computations occur in the following
# order:
# reduce(take_while(scan(1)))
# reduce(take_while(scan(2)))
# reduce(take_while(scan(3)))
init_vars = get_state()
_verify_loop_init_vars(init_vars, symbol_names)
# Workaround for Dataset.reduce not allowing empty state tensors - create
# a dummy state variable that remains unused.
# TODO(mdan): reduce should allow and match empty structures.
if not init_vars:
init_vars = (constant_op.constant(0),)
symbol_names = ('<internal dummy>',)
def dummy_set_state(unused_dummy):
pass
def dummy_get_state():
return (constant_op.constant(0),)
get_state, set_state = dummy_get_state, dummy_set_state
def scan_body(scan_state, scan_inputs):
"""Main body of the Dataset.scan."""
loop_vars, iterate = scan_state, scan_inputs
set_state(loop_vars)
def main_path():
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts,
check_shapes=False)
return new_loop_vars
if extra_test is not None:
extra_cond = extra_test()
new_loop_vars = control_flow_ops.cond(
extra_cond, main_path, lambda: loop_vars)
else:
# TODO(mdan): the optimizer should be able to remove an invariant cond?
extra_cond = (constant_op.constant(True),) # dummy value, unused
new_loop_vars = main_path()
scan_outputs = new_loop_vars, extra_cond
new_scan_state = new_loop_vars
return new_scan_state, scan_outputs
def take_while_predicate(unused_loop_vars, extra_cond):
return extra_cond
def reduce_body(unused_reduce_state, scan_outputs):
output_loop_vars, unused_extra_cond = scan_outputs
new_reduce_state = output_loop_vars
return new_reduce_state
ds = _general_purpose_scan(ds, init_vars, scan_body)
if extra_test is not None:
ds = ds.apply(take_while_ops.take_while(take_while_predicate))
final_loop_vars = ds.reduce(init_vars, reduce_body)
set_state(final_loop_vars)
def _tf_distributed_iterable_for_stmt(
iter_, extra_test, body, get_state, set_state, symbol_names, opts):
"""Overload of for_stmt that iterates over TF distributed datasets."""
if extra_test is not None:
raise NotImplementedError(
'break and return statements are not yet supported in '
'for ... in distributed input loops.')
init_vars = get_state()
_verify_loop_init_vars(init_vars, symbol_names)
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
def reduce_body(loop_vars, iterate):
set_state(loop_vars)
body(iterate)
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
return new_loop_vars
set_state(iter_.reduce(init_vars, reduce_body))
def while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Functional form of a while statement.
The loop operates on a so-called state, which includes all symbols that are
variant across loop iterations. In what follows we refer to state as either
a tuple of entities that represent an actual state, or a list of arguments
of the corresponding types.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
test: Callable with boolean return type. The loop condition.
body: Callable representing the actual loop body.
get_state: Additional callable which can capture additional state (such as
the values of composite symbols). This is only useful when staging the
loop.
set_state: Additional callable which save values captured by get_state back
into the Python environment. This is only useful when staging the loop.
symbol_names: Tuple containing the names of all loop variables.
opts: Optional dict of extra loop parameters.
Returns:
Tuple containing the final state.
"""
# Evaluate the initial test once in order to do the dispatch. The evaluation
# is isolated to minimize unwanted side effects.
# TODO(mdan): Do a full iteration - some state types might lower to Tensor.
with func_graph.FuncGraph('tmp').as_default():
init_test = test()
# TensorFlow: Multiple evaluations are acceptable in this case, so we're fine
# with the re-evaluation of `test` that `_tf_while_stmt` will make.
if tensors.is_dense_tensor(init_test):
_tf_while_stmt(test, body, get_state, set_state, symbol_names, opts)
return
# Normal Python: We already consumed one evaluation of `test`; consistently,
# unroll one iteration before dispatching to a normal loop.
# TODO(mdan): Push the "init_test" value via opts into _py_while_stmt?
if not init_test:
return
body()
_py_while_stmt(test, body, get_state, set_state, opts)
class _PythonLoopChecker(object):
"""Verifies Python loops for TF-specific limits."""
__slots__ = (
'iterations',
'check_inefficient_unroll',
'check_op_count_after_iteration',
'ops_before_iteration',
)
def __init__(self):
self.iterations = 1
self.check_inefficient_unroll = WARN_INEFFICIENT_UNROLL
# Triggered when we decided to test the op counts.
self.check_op_count_after_iteration = False
def _get_ops(self):
return ops.get_default_graph().get_operations()
def _check_unroll_limits(self):
if self.iterations > PYTHON_MAX_ITERATIONS:
raise ValueError('iteration limit exceeded')
def _stop_checking_inefficient_unroll(self):
self.check_inefficient_unroll = False
self.check_op_count_after_iteration = False
self.ops_before_iteration = None
def _verify_inefficient_unroll(self):
"""Checks for possibly-inefficient creation of ops in a Python loop."""
assert self.ops_before_iteration is not None
ops_after_iteration = self._get_ops()
new_ops = tuple(
op for op in ops_after_iteration if op not in self.ops_before_iteration)
if len(new_ops) < INEFFICIENT_UNROLL_MIN_OPS:
return False
ag_logging.warn(
'Large unrolled loop detected. Did you mean to use a TF loop?'
' The following ops were created after iteration %s: %s'
'\nSee'
' https://github.com/tensorflow/tensorflow/blob/master/'
'tensorflow/python/autograph/g3doc/reference/common_errors.md'
'#warning-large-unrolled-loop-detected'
'\n'
'Location:'
'\n%s'
'', self.iterations, new_ops, '\n'.join(traceback.format_stack()))
return True
def before_iteration(self):
"""Called before each iteration in a Python loop."""
if (self.check_inefficient_unroll and
self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS):
self.ops_before_iteration = self._get_ops()
self.check_op_count_after_iteration = True
def after_iteration(self):
"""Called after each iteration in a Python loop."""
self.iterations += 1
self._check_unroll_limits()
if self.check_op_count_after_iteration:
did_warn = self._verify_inefficient_unroll()
if did_warn:
self._stop_checking_inefficient_unroll() # Only warn once.
elif self.iterations > INEFFICIENT_UNROLL_MIN_ITERATIONS + 3:
# Once deciding to check the op counts, only do it for a few iterations.
self._stop_checking_inefficient_unroll()
def _py_while_stmt(test, body, get_state, set_state, opts):
"""Overload of while_stmt that executes a Python while loop."""
del opts, get_state, set_state
if __debug__:
checker = _PythonLoopChecker()
before_iteration = checker.before_iteration
after_iteration = checker.after_iteration
before_iteration()
original_body = body
def protected_body():
original_body()
after_iteration()
before_iteration()
body = protected_body
while test():
body()
def _shape_invariants_mapping_to_positional_list(mapping, keys):
# The keys are not expected to be hashable.
mapping = {id(k): (k, v) for k, v in mapping}
result = []
for k in keys:
map_key, map_val = mapping.get(id(k), (None, None))
result.append(map_val if map_key is k else None)
return tuple(result)
# Textual description of what a legal TF loop variable is. This description
# summarizes types that _placeholder_value below can handle. Keep the two
# together and in sync.
LEGAL_LOOP_TYPES = 'Tensor, int, float, bool or a list, tuple or dict thereof'
def _placeholder_value(like, original=None):
if isinstance(like, (variables.Undefined, variables.UndefinedReturnValue)):
return original
if isinstance(like, (int, float, bool)):
return type(like)(0)
if tensor_util.is_tf_type(like):
return array_ops.zeros(like.shape, like.dtype)
elif isinstance(like, (list, tuple, dict)):
return nest.map_structure(_placeholder_value, like)
return original
def _try_handling_undefineds(
body, get_state, set_state, init_vars, nulls, symbol_names):
"""Makes a best-effort attempt to substitute undefineds with placeholders.
Note: this substitution requires two things to happen:
1. the types of loop variables could be inferred (usually by staging one
iteration)
2. these types could be replaced by placeholders (e.g. zero values, for
tensors.
Args:
body: a function representing the loop body. See while_stmt.
get_state: state getter for the loop statement. See while_stmt.
set_state: state getter for the loop statement. See while_stmt.
init_vars: loop variables before entering the loop. See while_stmt.
nulls: list of boolean flags indicating whether the corresponding loop
var is None or undefined.
symbol_names: list of loop variable names. See while_stmt.
Returns:
A tuple (success, new_init_vars). success is a boolean flag indicating
whether types could be successfully inferred (step 1 above). new_init_vars
contains the loop vars, with None or undefined values replaced by
placeholders, where possible (step 2 above).
"""
state_modified = False
try:
# Stage an iteration of the loop body in a temporary graph.
with func_graph.FuncGraph('tmp').as_default():
# This call to set_state helps report nicer error messages when symbols
# are inconsistently used.
set_state(init_vars)
state_modified = True
body()
first_iter_vars = get_state()
except (UnboundLocalError, TypeError, ValueError, KeyError):
ag_logging.log(1, 'Caught error while staging loop body', exc_info=True)
# Fall back to the old functionality. It will likely result in an input
# validation failure.
first_iter_vars = None
finally:
if state_modified:
set_state(init_vars)
if first_iter_vars is not None:
# Note: the actual placeholder value doesn't matter, because as the staging
# proved, it will be replaced by an actual value before being read.
init_vars = tuple(
(_placeholder_value(iv, v) if n else v)
for v, n, iv in zip(init_vars, nulls, first_iter_vars))
success = True
else:
success = False
# This check runs regardless, in case we captured non-Tensor inputs.
_verify_loop_init_vars(init_vars, symbol_names, first_iter_vars)
return success, init_vars
def _runtime_zero_iterations_errmsg(symbol_names, nulls, init_vars):
"""Creates an error message asking for the loop to iterate at least once."""
var_names = []
for sn, n, v in zip(symbol_names, nulls, init_vars):
if not n:
continue
if isinstance(v, variables.UndefinedReturnValue):
var_names.append('the function return value')
else:
var_names.append(sn)
var_names = ', '.join(var_names)
return 'loop must iterate at least once to initialize {}'.format(var_names)
def _tf_while_stmt(test, body, get_state, set_state, symbol_names, opts):
"""Overload of while_stmt that stages a TF while_stmt."""
init_vars = get_state()
orig_init_vars = init_vars
nulls = tuple(_is_none_or_undef(v) for v in init_vars)
if any(nulls):
require_one_iteration, init_vars = _try_handling_undefineds(
body, get_state, set_state, init_vars, nulls, symbol_names)
else:
require_one_iteration = False
def aug_test(*loop_vars):
if require_one_iteration:
loop_vars = loop_vars[1:]
set_state(loop_vars)
return _verify_tf_condition(test(), 'while loop')
def aug_body(*loop_vars):
if require_one_iteration:
loop_vars = loop_vars[1:]
set_state(loop_vars)
body()
new_loop_vars = get_state()
_verify_tf_loop_vars(
init_vars, loop_vars, new_loop_vars, symbol_names, opts)
if require_one_iteration:
new_loop_vars = (True,) + new_loop_vars
return new_loop_vars
if 'shape_invariants' in opts:
opts['shape_invariants'] = _shape_invariants_mapping_to_positional_list(
opts['shape_invariants'], init_vars)
while_loop_opts = dict(opts)
while_loop_opts.pop('iterate_names', None)
# Non-v2 while_loop unpacks the results when there is only one return value.
# This enforces consistency across versions.
while_loop_opts['return_same_structure'] = True
if require_one_iteration:
aug_init_vars = (False,) + init_vars
else:
aug_init_vars = init_vars
final_loop_vars = control_flow_ops.while_loop(
aug_test, aug_body, aug_init_vars, **while_loop_opts)
if require_one_iteration:
with ops.control_dependencies([
control_flow_ops.Assert(final_loop_vars[0], [
_runtime_zero_iterations_errmsg(symbol_names, nulls, orig_init_vars)
])
]):
final_loop_vars = nest.map_structure(
lambda v: (array_ops.identity(v) if tensor_util.is_tf_type(v) else v),
final_loop_vars[1:],
)
set_state(final_loop_vars)
def if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts):
"""Functional form of an if statement.
The conditional operates on a state, which includes all symbols whose values
are a function of the branch taken.
For example, given the code below that calculates the abs function:
```
x = 1
if x > 0:
x = -x
```
The state is represented by the variable `x`. The `body, `orelse` and
`set_state` functions must bind to the original `x` symbol, using `nonlocal`.
The inputs and outputs of the callables representing the loop blocks are not
explicit - instead, these functions must use nonlocal/global for side effects.
The inputs and outputs are instead controlled by the set_state/get_state
functions.
Args:
cond: Boolean.
body: Callable representing the main block of the conditional.
orelse: Callable representing the else block of the conditional.
get_state: Function that returns a tuple containing the values of all
composite symbols modified within the conditional. This allows access to
state that branches may mutate through side effects. This function is not
needed and should not be called when dispatching to code matching Python's
default semantics. This is useful for checkpointing to avoid unintended
side-effects when staging requires evaluating all code-paths.
set_state: Function to set the values of all composite symbols modified
within the conditional. This is the complement to get_state, used to
restore checkpointed values. The single argument a tuple containing values
for each composite symbol that may be modified in a branch of the
conditional. The is usually the result of a call to get_state.
symbol_names: Tuple containing basic loop var names.
nouts: Number of variables output by the statement. Vars which are
not outputs will not be passed through staged control flow such as
tf.cond. This includes variables that are defined before the conditional,
but are not used after it.
"""
# Note: tf.cond doesn't support SparseTensor.
if tensors.is_dense_tensor(cond):
_tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)
else:
_py_if_stmt(cond, body, orelse)
def _tf_if_stmt(
cond, body, orelse, get_state, set_state, symbol_names, nouts):
"""Overload of if_stmt that stages a TF cond."""
cond = _verify_tf_condition(cond, 'if statement')
if not nouts:
prev_get_state, prev_set_state = get_state, set_state
# Control flow V1 wants at least one output.
get_state = lambda: (0,) + prev_get_state()
set_state = lambda v: prev_set_state(v[1:])
symbol_names += ('<unused dummy>',)
nouts = 1
init_vars = get_state()
# TODO(mdan): Use nonlocal once we no longer need to support py2.
new_body_vars_ = [None]
new_orelse_vars_ = [None]
def aug_body():
set_state(init_vars)
body()
new_body_vars = get_state()
new_body_vars = new_body_vars[:nouts]
new_body_vars_[0] = new_body_vars
_verify_tf_cond_branch_vars(new_body_vars, symbol_names, 'main')
if new_orelse_vars_[0] is not None:
_verify_tf_cond_vars(new_body_vars, new_orelse_vars_[0], symbol_names)
return new_body_vars
def aug_orelse():
set_state(init_vars)
orelse()
new_orelse_vars = get_state()
new_orelse_vars = new_orelse_vars[:nouts]
new_orelse_vars_[0] = new_orelse_vars
_verify_tf_cond_branch_vars(new_orelse_vars, symbol_names, 'else')
if new_body_vars_[0] is not None:
_verify_tf_cond_vars(new_body_vars_[0], new_orelse_vars, symbol_names)
return new_orelse_vars
final_cond_vars = control_flow_ops.cond(
cond, aug_body, aug_orelse, strict=True)
final_cond_vars = final_cond_vars + init_vars[nouts:]
set_state(final_cond_vars)
def _py_if_stmt(cond, body, orelse):
"""Overload of if_stmt that executes a Python if statement."""
return body() if cond else orelse()
| |
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import namedtuple
import logging
import math
import os
import sys
from awscli.customizations.s3.utils import find_chunksize, \
find_bucket_key, relative_path, PrintTask, create_warning
from awscli.customizations.s3.executor import Executor
from awscli.customizations.s3 import tasks
from awscli.customizations.s3.transferconfig import RuntimeConfig
from awscli.compat import six
from awscli.compat import queue
LOGGER = logging.getLogger(__name__)
# Maximum object size allowed in S3.
# See: http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html
MAX_UPLOAD_SIZE = 5 * (1024 ** 4)
CommandResult = namedtuple('CommandResult',
['num_tasks_failed', 'num_tasks_warned'])
class S3Handler(object):
"""
This class sets up the process to perform the tasks sent to it. It
sources the ``self.executor`` from which threads inside the
class pull tasks from to complete.
"""
MAX_IO_QUEUE_SIZE = 20
def __init__(self, session, params, result_queue=None,
runtime_config=None):
self.session = session
if runtime_config is None:
runtime_config = RuntimeConfig.defaults()
self._runtime_config = runtime_config
# The write_queue has potential for optimizations, so the constant
# for maxsize is scoped to this class (as opposed to constants.py)
# so we have the ability to change this value later.
self.write_queue = queue.Queue(maxsize=self.MAX_IO_QUEUE_SIZE)
self.result_queue = result_queue
if not self.result_queue:
self.result_queue = queue.Queue()
self.params = {
'dryrun': False, 'quiet': False, 'acl': None,
'guess_mime_type': True, 'sse_c_copy_source': None,
'sse_c_copy_source_key': None, 'sse': None,
'sse_c': None, 'sse_c_key': None, 'sse_kms_key_id': None,
'storage_class': None, 'website_redirect': None,
'content_type': None, 'cache_control': None,
'content_disposition': None, 'content_encoding': None,
'content_language': None, 'expires': None, 'grants': None,
'only_show_errors': False, 'is_stream': False,
'paths_type': None, 'expected_size': None, 'metadata': None,
'metadata_directive': None, 'ignore_glacier_warnings': False
}
self.params['region'] = params['region']
for key in self.params.keys():
if key in params:
self.params[key] = params[key]
self.multi_threshold = self._runtime_config['multipart_threshold']
self.chunksize = self._runtime_config['multipart_chunksize']
LOGGER.debug("Using a multipart threshold of %s and a part size of %s",
self.multi_threshold, self.chunksize)
self.executor = Executor(
num_threads=self._runtime_config['max_concurrent_requests'],
result_queue=self.result_queue,
quiet=self.params['quiet'],
only_show_errors=self.params['only_show_errors'],
max_queue_size=self._runtime_config['max_queue_size'],
write_queue=self.write_queue
)
self._multipart_uploads = []
self._multipart_downloads = []
def call(self, files):
"""
This function pulls a ``FileInfo`` or ``TaskInfo`` object from
a list ``files``. Each object is then deemed if it will be a
multipart operation and add the necessary attributes if so. Each
object is then wrapped with a ``BasicTask`` object which is
essentially a thread of execution for a thread to follow. These
tasks are then submitted to the main executor.
"""
try:
self.executor.start()
total_files, total_parts = self._enqueue_tasks(files)
self.executor.print_thread.set_total_files(total_files)
self.executor.print_thread.set_total_parts(total_parts)
self.executor.initiate_shutdown()
self.executor.wait_until_shutdown()
self._shutdown()
except Exception as e:
LOGGER.debug('Exception caught during task execution: %s',
str(e), exc_info=True)
self.result_queue.put(PrintTask(message=str(e), error=True))
self.executor.initiate_shutdown(
priority=self.executor.IMMEDIATE_PRIORITY)
self._shutdown()
self.executor.wait_until_shutdown()
except KeyboardInterrupt:
self.result_queue.put(PrintTask(message=("Cleaning up. "
"Please wait..."),
error=True))
self.executor.initiate_shutdown(
priority=self.executor.IMMEDIATE_PRIORITY)
self._shutdown()
self.executor.wait_until_shutdown()
return CommandResult(self.executor.num_tasks_failed,
self.executor.num_tasks_warned)
def _shutdown(self):
# And finally we need to make a pass through all the existing
# multipart uploads and abort any pending multipart uploads.
self._abort_pending_multipart_uploads()
self._remove_pending_downloads()
def _abort_pending_multipart_uploads(self):
# For the purpose of aborting uploads, we consider any
# upload context with an upload id.
for upload, filename in self._multipart_uploads:
if upload.is_cancelled():
try:
upload.wait_for_upload_id()
except tasks.UploadCancelledError:
pass
else:
# This means that the upload went from STARTED -> CANCELLED.
# This could happen if a part thread decided to cancel the
# upload. We need to explicitly abort the upload here.
self._cancel_upload(upload.wait_for_upload_id(), filename)
upload.cancel_upload(self._cancel_upload, args=(filename,))
def _remove_pending_downloads(self):
# The downloads case is easier than the uploads case because we don't
# need to make any service calls. To properly cleanup we just need
# to go through the multipart downloads that were in progress but
# cancelled and remove the local file.
for context, local_filename in self._multipart_downloads:
if (context.is_cancelled() or context.is_started()) and \
os.path.exists(local_filename):
# The file is in an inconsistent state (not all the parts
# were written to the file) so we should remove the
# local file rather than leave it in a bad state. We don't
# want to remove the files if the download has *not* been
# started because we haven't touched the file yet, so it's
# better to leave the old version of the file rather than
# deleting the file entirely.
os.remove(local_filename)
context.cancel()
def _cancel_upload(self, upload_id, filename):
bucket, key = find_bucket_key(filename.dest)
params = {
'Bucket': bucket,
'Key': key,
'UploadId': upload_id,
}
LOGGER.debug("Aborting multipart upload for: %s", key)
filename.client.abort_multipart_upload(**params)
def _enqueue_tasks(self, files):
total_files = 0
total_parts = 0
for filename in files:
num_uploads = 1
is_multipart_task = self._is_multipart_task(filename)
too_large = False
if hasattr(filename, 'size'):
too_large = filename.size > MAX_UPLOAD_SIZE
if too_large and filename.operation_name == 'upload':
warning_message = "File exceeds s3 upload limit of 5 TB."
warning = create_warning(relative_path(filename.src),
message=warning_message)
self.result_queue.put(warning)
# Warn and skip over glacier incompatible tasks.
elif not filename.is_glacier_compatible():
LOGGER.debug(
'Encountered glacier object s3://%s. Not performing '
'%s on object.' % (filename.src, filename.operation_name))
if not self.params['ignore_glacier_warnings']:
warning = create_warning(
's3://'+filename.src,
'Object is of storage class GLACIER. Unable to '
'perform %s operations on GLACIER objects. You must '
'restore the object to be able to the perform '
'operation.' %
filename.operation_name
)
self.result_queue.put(warning)
continue
elif is_multipart_task and not self.params['dryrun']:
# If we're in dryrun mode, then we don't need the
# real multipart tasks. We can just use a BasicTask
# in the else clause below, which will print out the
# fact that it's transferring a file rather than
# the specific part tasks required to perform the
# transfer.
num_uploads = self._enqueue_multipart_tasks(filename)
else:
task = tasks.BasicTask(
session=self.session, filename=filename,
parameters=self.params,
result_queue=self.result_queue)
self.executor.submit(task)
total_files += 1
total_parts += num_uploads
return total_files, total_parts
def _is_multipart_task(self, filename):
# First we need to determine if it's an operation that even
# qualifies for multipart upload.
if hasattr(filename, 'size'):
above_multipart_threshold = filename.size > self.multi_threshold
if above_multipart_threshold:
if filename.operation_name in ('upload', 'download',
'move', 'copy'):
return True
else:
return False
else:
return False
def _enqueue_multipart_tasks(self, filename):
num_uploads = 1
if filename.operation_name == 'upload':
num_uploads = self._enqueue_multipart_upload_tasks(filename)
elif filename.operation_name == 'move':
if filename.src_type == 'local' and filename.dest_type == 's3':
num_uploads = self._enqueue_multipart_upload_tasks(
filename, remove_local_file=True)
elif filename.src_type == 's3' and filename.dest_type == 'local':
num_uploads = self._enqueue_range_download_tasks(
filename, remove_remote_file=True)
elif filename.src_type == 's3' and filename.dest_type == 's3':
num_uploads = self._enqueue_multipart_copy_tasks(
filename, remove_remote_file=True)
else:
raise ValueError("Unknown transfer type of %s -> %s" %
(filename.src_type, filename.dest_type))
elif filename.operation_name == 'copy':
num_uploads = self._enqueue_multipart_copy_tasks(
filename, remove_remote_file=False)
elif filename.operation_name == 'download':
num_uploads = self._enqueue_range_download_tasks(filename)
return num_uploads
def _enqueue_range_download_tasks(self, filename, remove_remote_file=False):
chunksize = find_chunksize(filename.size, self.chunksize)
num_downloads = int(filename.size / chunksize)
context = tasks.MultipartDownloadContext(num_downloads)
create_file_task = tasks.CreateLocalFileTask(
context=context, filename=filename,
result_queue=self.result_queue)
self.executor.submit(create_file_task)
self._do_enqueue_range_download_tasks(
filename=filename, chunksize=chunksize,
num_downloads=num_downloads, context=context,
remove_remote_file=remove_remote_file
)
complete_file_task = tasks.CompleteDownloadTask(
context=context, filename=filename, result_queue=self.result_queue,
params=self.params, io_queue=self.write_queue)
self.executor.submit(complete_file_task)
self._multipart_downloads.append((context, filename.dest))
if remove_remote_file:
remove_task = tasks.RemoveRemoteObjectTask(
filename=filename, context=context)
self.executor.submit(remove_task)
return num_downloads
def _do_enqueue_range_download_tasks(self, filename, chunksize,
num_downloads, context,
remove_remote_file=False):
for i in range(num_downloads):
task = tasks.DownloadPartTask(
part_number=i, chunk_size=chunksize,
result_queue=self.result_queue, filename=filename,
context=context, io_queue=self.write_queue,
params=self.params)
self.executor.submit(task)
def _enqueue_multipart_upload_tasks(self, filename,
remove_local_file=False):
# First we need to create a CreateMultipartUpload task,
# then create UploadTask objects for each of the parts.
# And finally enqueue a CompleteMultipartUploadTask.
chunksize = find_chunksize(filename.size, self.chunksize)
num_uploads = int(math.ceil(filename.size /
float(chunksize)))
upload_context = self._enqueue_upload_start_task(
chunksize, num_uploads, filename)
self._enqueue_upload_tasks(
num_uploads, chunksize, upload_context, filename, tasks.UploadPartTask)
self._enqueue_upload_end_task(filename, upload_context)
if remove_local_file:
remove_task = tasks.RemoveFileTask(local_filename=filename.src,
upload_context=upload_context)
self.executor.submit(remove_task)
return num_uploads
def _enqueue_multipart_copy_tasks(self, filename,
remove_remote_file=False):
chunksize = find_chunksize(filename.size, self.chunksize)
num_uploads = int(math.ceil(filename.size / float(chunksize)))
upload_context = self._enqueue_upload_start_task(
chunksize, num_uploads, filename)
self._enqueue_upload_tasks(
num_uploads, chunksize, upload_context, filename, tasks.CopyPartTask)
self._enqueue_upload_end_task(filename, upload_context)
if remove_remote_file:
remove_task = tasks.RemoveRemoteObjectTask(
filename=filename, context=upload_context)
self.executor.submit(remove_task)
return num_uploads
def _enqueue_upload_start_task(self, chunksize, num_uploads, filename):
upload_context = tasks.MultipartUploadContext(
expected_parts=num_uploads)
create_multipart_upload_task = tasks.CreateMultipartUploadTask(
session=self.session, filename=filename,
parameters=self.params,
result_queue=self.result_queue, upload_context=upload_context)
self.executor.submit(create_multipart_upload_task)
return upload_context
def _enqueue_upload_tasks(self, num_uploads, chunksize, upload_context,
filename, task_class):
for i in range(1, (num_uploads + 1)):
self._enqueue_upload_single_part_task(
part_number=i,
chunk_size=chunksize,
upload_context=upload_context,
filename=filename,
task_class=task_class
)
def _enqueue_upload_single_part_task(self, part_number, chunk_size,
upload_context, filename, task_class,
payload=None):
kwargs = {'part_number': part_number, 'chunk_size': chunk_size,
'result_queue': self.result_queue,
'upload_context': upload_context, 'filename': filename,
'params': self.params}
if payload:
kwargs['payload'] = payload
task = task_class(**kwargs)
self.executor.submit(task)
def _enqueue_upload_end_task(self, filename, upload_context):
complete_multipart_upload_task = tasks.CompleteMultipartUploadTask(
session=self.session, filename=filename, parameters=self.params,
result_queue=self.result_queue, upload_context=upload_context)
self.executor.submit(complete_multipart_upload_task)
self._multipart_uploads.append((upload_context, filename))
class S3StreamHandler(S3Handler):
"""
This class is an alternative ``S3Handler`` to be used when the operation
involves a stream since the logic is different when uploading and
downloading streams.
"""
# This ensures that the number of multipart chunks waiting in the
# executor queue and in the threads is limited.
MAX_EXECUTOR_QUEUE_SIZE = 2
EXECUTOR_NUM_THREADS = 6
def __init__(self, session, params, result_queue=None,
runtime_config=None):
if runtime_config is None:
# Rather than using the .defaults(), streaming
# has different default values so that it does not
# consume large amounts of memory.
runtime_config = RuntimeConfig().build_config(
max_queue_size=self.MAX_EXECUTOR_QUEUE_SIZE,
max_concurrent_requests=self.EXECUTOR_NUM_THREADS)
super(S3StreamHandler, self).__init__(session, params, result_queue,
runtime_config)
def _enqueue_tasks(self, files):
total_files = 0
total_parts = 0
for filename in files:
num_uploads = 1
# If uploading stream, it is required to read from the stream
# to determine if the stream needs to be multipart uploaded.
payload = None
if filename.operation_name == 'upload':
payload, is_multipart_task = \
self._pull_from_stream(self.multi_threshold)
else:
# Set the file size for the ``FileInfo`` object since
# streams do not use a ``FileGenerator`` that usually
# determines the size.
filename.set_size_from_s3()
is_multipart_task = self._is_multipart_task(filename)
if is_multipart_task and not self.params['dryrun']:
# If we're in dryrun mode, then we don't need the
# real multipart tasks. We can just use a BasicTask
# in the else clause below, which will print out the
# fact that it's transferring a file rather than
# the specific part tasks required to perform the
# transfer.
num_uploads = self._enqueue_multipart_tasks(filename, payload)
else:
task = tasks.BasicTask(
session=self.session, filename=filename,
parameters=self.params,
result_queue=self.result_queue,
payload=payload)
self.executor.submit(task)
total_files += 1
total_parts += num_uploads
return total_files, total_parts
def _pull_from_stream(self, amount_requested):
"""
This function pulls data from stdin until it hits the amount
requested or there is no more left to pull in from stdin. The
function wraps the data into a ``BytesIO`` object that is returned
along with a boolean telling whether the amount requested is
the amount returned.
"""
stream_filein = sys.stdin
if six.PY3:
stream_filein = sys.stdin.buffer
payload = stream_filein.read(amount_requested)
payload_file = six.BytesIO(payload)
return payload_file, len(payload) == amount_requested
def _enqueue_multipart_tasks(self, filename, payload=None):
num_uploads = 1
if filename.operation_name == 'upload':
num_uploads = self._enqueue_multipart_upload_tasks(filename,
payload=payload)
elif filename.operation_name == 'download':
num_uploads = self._enqueue_range_download_tasks(filename)
return num_uploads
def _enqueue_range_download_tasks(self, filename, remove_remote_file=False):
# Create the context for the multipart download.
chunksize = find_chunksize(filename.size, self.chunksize)
num_downloads = int(filename.size / chunksize)
context = tasks.MultipartDownloadContext(num_downloads)
# No file is needed for downloading a stream. So just announce
# that it has been made since it is required for the context to
# begin downloading.
context.announce_file_created()
# Submit download part tasks to the executor.
self._do_enqueue_range_download_tasks(
filename=filename, chunksize=chunksize,
num_downloads=num_downloads, context=context,
remove_remote_file=remove_remote_file
)
return num_downloads
def _enqueue_multipart_upload_tasks(self, filename, payload=None):
# First we need to create a CreateMultipartUpload task,
# then create UploadTask objects for each of the parts.
# And finally enqueue a CompleteMultipartUploadTask.
chunksize = self.chunksize
# Determine an appropriate chunksize if given an expected size.
if self.params['expected_size']:
chunksize = find_chunksize(int(self.params['expected_size']),
self.chunksize)
num_uploads = '...'
# Submit a task to begin the multipart upload.
upload_context = self._enqueue_upload_start_task(
chunksize, num_uploads, filename)
# Now submit a task to upload the initial chunk of data pulled
# from the stream that was used to determine if a multipart upload
# was needed.
self._enqueue_upload_single_part_task(
part_number=1, chunk_size=chunksize,
upload_context=upload_context, filename=filename,
task_class=tasks.UploadPartTask, payload=payload
)
# Submit tasks to upload the rest of the chunks of the data coming in
# from standard input.
num_uploads = self._enqueue_upload_tasks(
num_uploads, chunksize, upload_context,
filename, tasks.UploadPartTask
)
# Submit a task to notify the multipart upload is complete.
self._enqueue_upload_end_task(filename, upload_context)
return num_uploads
def _enqueue_upload_tasks(self, num_uploads, chunksize, upload_context,
filename, task_class):
# The previous upload occured right after the multipart
# upload started for a stream.
num_uploads = 1
while True:
# Pull more data from standard input.
payload, is_remaining = self._pull_from_stream(chunksize)
# Submit an upload part task for the recently pulled data.
self._enqueue_upload_single_part_task(
part_number=num_uploads+1,
chunk_size=chunksize,
upload_context=upload_context,
filename=filename,
task_class=task_class,
payload=payload
)
num_uploads += 1
if not is_remaining:
break
# Once there is no more data left, announce to the context how
# many parts are being uploaded so it knows when it can quit.
upload_context.announce_total_parts(num_uploads)
return num_uploads
| |
# ref ID: 7
config = {
"name": "Dungeon Arena", #plugin name
"type": "generator", #plugin type
"description": ["Dungeon Arena"] #description
}
import sys
from collections import defaultdict
if __name__ == "__main__":
sys.path.extend(["."])
import os
os.chdir("..")
del (os)
from pgu import gui
from math import sqrt, cos, sin, pi
from random import *
import pygame
from os.path import join as osjoin
from omnitool.database import version, tiles, names
from omnitool.tlib import *
from omnitool.tinterface import *
from omnitool.binarysplit import join, cleanup
from .arena_lib.arenaitems import items as arenaitems
class Generator():
def __init__(self):
pass
def run(self):
from omnitool.shared import lang, theme, exit_prog, __version__
from omnitool.pgu_override import Quitbutton
torch_chances = [lang.at_full, lang.at_blue, lang.at_red, lang.at_green,
lang.at_pink, lang.at_white, lang.at_yellow, lang.at_purple,
lang.at_lime]
name = 'Dungeon Arena'
if hasattr(sys, "frozen"):
import os
os.chdir(os.path.dirname(sys.executable))
def update(slider, label):
label.set_text(str(slider.value))
def update_per(slider, label):
label.set_text(str(slider.value) + "%")
def update_2(slider, label):
label.set_text(str(slider.value * slider.value))
pygame.display.init()
pygame.display.set_caption(name)
def weighted(liste):
n = uniform(0, 1)
for item, weight in liste:
if n < weight:
break
n = n - weight
return item
app = gui.Desktop(theme=theme)
app.connect(gui.QUIT, exit_prog, None)
main = gui.Table()
main.td(gui.Label(lang.a_name), align=-1)
nameinput = gui.Input("Dungeon Arena OT-V" + str(__version__), width=200)
main.td(nameinput, colspan=2)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
rooms = gui.HSlider(value=15, min=3, max=31, size=20, height=16, width=150)
roomstext = gui.Label(str(15 * 15))
rooms.connect(gui.CHANGE, update_2, rooms, roomstext)
main.td(gui.Label(lang.a_rooms), align=-1)
main.td(rooms, align=-1)
main.td(roomstext)
main.tr()
roomsize = gui.HSlider(value=12, min=9, max=36, size=20, height=16, width=150)
roomsizetext = gui.Label("12")
roomsize.connect(gui.CHANGE, update, roomsize, roomsizetext)
main.td(gui.Label(lang.a_sidelen), align=-1)
main.td(roomsize, align=-1)
main.td(roomsizetext)
main.tr()
corridor = gui.HSlider(value=6, min=3, max=9, size=20, height=16, width=150)
corridortext = gui.Label("6")
corridor.connect(gui.CHANGE, update, corridor, corridortext)
main.td(gui.Label(lang.a_corlen), align=-1)
main.td(corridor, align=-1)
main.td(corridortext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
chestcount = gui.HSlider(value=100, min=0, max=100, size=20, height=16, width=150)
chesttext = gui.Label("100%")
chestcount.connect(gui.CHANGE, update_per, chestcount, chesttext)
main.td(gui.Label(lang.a_chest), align=-1)
main.td(chestcount, align=-1)
main.td(chesttext)
main.tr()
itemcount = gui.HSlider(value=1, min=0, max=20, size=20, height=16, width=150)
itemtext = gui.Label("1")
itemcount.connect(gui.CHANGE, update, itemcount, itemtext)
main.td(gui.Label(lang.a_itemchest), align=-1)
main.td(itemcount, align=-1)
main.td(itemtext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
torchcount = gui.HSlider(value=100, min=0, max=100, size=20, height=16, width=150)
torchtext = gui.Label("100%")
torchcount.connect(gui.CHANGE, update_per, torchcount, torchtext)
main.td(gui.Label(lang.a_light), align=-1)
main.td(torchcount, align=-1)
main.td(torchtext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
torch_sel = []
main.td(gui.Label(lang.at_chances), align=-1)
main.tr()
for t in torch_chances:
torchsel = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
torcht = gui.Label("1")
torchsel.connect(gui.CHANGE, update, torchsel, torcht)
main.td(gui.Label(t), align=-1)
main.td(torchsel, align=-1)
main.td(torcht)
torch_sel.append(torchsel)
main.tr()
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
main.td(gui.Label(lang.a_chances), colspan=2, align=-1)
main.td(gui.Spacer(50, 1))
main.tr()
standardcount = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
standardtext = gui.Label("1")
standardcount.connect(gui.CHANGE, update, standardcount, standardtext)
main.td(gui.Label(lang.a_standard), align=-1)
main.td(standardcount, align=-1)
main.td(standardtext)
main.tr()
crosscount = gui.HSlider(value=1, min=0, max=10, size=20, height=16, width=150)
crosstext = gui.Label("1")
crosscount.connect(gui.CHANGE, update, crosscount, crosstext)
main.td(gui.Label(lang.a_cross), align=-1)
main.td(crosscount, align=-1)
main.td(crosstext)
main.tr()
main.td(gui.Spacer(1, 12))
main.tr()
main.td(Quitbutton(app, lang.pt_start), colspan=3)
app.run(main)
pygame.display.quit()
selection = [("cross", crosscount.value),
("standard", standardcount.value)]
torch_selection = []
x = 0
for v in torch_sel:
torch_selection.append((x, v.value))
x += 1
weight = 0
for item, w in selection:
weight += w
if not weight:
print("UserError: No rooms to place.")
import time
time.sleep(10)
t_weight = 0
for item, w in torch_selection:
t_weight += w
if not t_weight:
print("UserError: No torches to place. To have no lighting ONLY set the lighting slider to zero.")
import time
time.sleep(10)
for x in range(len(torch_selection)):
torch_selection[x] = (torch_selection[x][0], torch_selection[x][1] / float(t_weight))
for x in range(len(selection)):
selection[x] = (selection[x][0], selection[x][1] / float(weight))
name = nameinput.value
chest_mode = itemcount.value
s = roomsize.value
chestchance = chestcount.value
roomwidth = s
roomheight = s
rooms = rooms.value
border = 250
corridor = corridor.value
torches = torchcount.value
size = (rooms * roomwidth + (rooms - 1) * corridor + border * 2,
rooms * roomheight + (rooms - 1) * corridor + border * 2)
dtile = choice([41, 43, 44])
dwall = choice([7, 8, 9])
if not rooms % 2:
spawn = (roomwidth // 2 + size[0] // 2, roomheight // 2 + size[1] // 2)
else:
spawn = (size[0] // 2, size[1] // 2)
print("Starting Generation")
header = {'spawn': spawn, 'groundlevel': -10.0, 'is_bloodmoon': 0,
'dungeon_xy': spawn, 'worldrect': (0, size[0] * 16, 0, size[1] * 16),
'is_meteor_spawned': 0, 'gob_inv_time': 0, 'rocklevel': size[1] // 2 + 0.4,
'gob_inv_x': 0.0, 'is_day': 1, 'shadow_orbs_broken': 0,
'width': size[0], 'version': version, 'gob_inv_type': 0,
'bosses_slain': (0, 0, 1), "npcs_saved": (0, 0, 0), "special_slain": (0, 0, 0), 'gob_inv_size': 0,
'height': size[1],
'ID': 1394008880, 'moonphase': 0, "hardmode": 0,
'name': name, "altars_broken": 0,
'is_a_shadow_orb_broken': 0, 'time': 13500}
is_exe = hasattr(sys, "frozen")
surface = pygame.surface.Surface(size)
surface.fill((254, 1, 255))
pygame.draw.rect(surface, (dtile, dtile, dtile),
((border - corridor, border - corridor),
(size[0] - border * 2 + corridor * 2, size[1] - border * 2 + corridor * 2)))
plat = (19, 0, 0)
chests = []
# contents of the spawn chest
multis = get_multis()
chestsurflist = (multis["woodchest"],
multis["goldchest"],
multis["shadowchest"],
multis["barrelchest"],
multis["canchest"])
for x in range(rooms): #horizontal
pygame.draw.rect(surface, (252, dwall, 0),
((border + corridor, border + roomheight // 2 - 2 + x * (roomheight + corridor)),
((rooms - 1) * (roomwidth + corridor), 4)))
for x in range(rooms): #vertical
pygame.draw.rect(surface, (252, dwall, 0),
((border + roomwidth // 2 - 2 + x * (roomheight + corridor), border + corridor),
(4, (rooms - 1) * (roomheight + corridor))))
for x in range(rooms):
for y in range(rooms):
rtype = weighted(selection)
ltype = weighted(torch_selection)
#print(ltype)
if rtype == "standard":
pos = (border + x * (roomwidth + corridor), border + y * (roomwidth + corridor))
pygame.draw.rect(surface, (252, dwall, 0),
(pos, (roomwidth, roomheight)))
if torches > randint(0, 100):
surface.set_at(pos, (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1]), (4, 0, ltype))
surface.set_at((pos[0], pos[1] + roomheight - 1), (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1] + roomheight - 1), (4, 0, ltype))
#platforms on ground with corridor
pygame.draw.line(surface, plat, (pos[0], pos[1] + roomheight // 2 + 2),
(pos[0] + roomwidth - 1, pos[1] + roomheight // 2 + 2))
#over corridor
pygame.draw.line(surface, plat, (pos[0], pos[1] + roomheight // 2 - 3),
(pos[0] + roomwidth - 1, pos[1] + roomheight // 2 - 3))
if y > 0: #lowest platform
pygame.draw.line(surface, plat, (pos[0] + roomwidth // 2 - 2, pos[1] - 1),
(pos[0] + roomwidth // 2 + 1, pos[1] - 1))
if y < rooms: #high platform
pygame.draw.line(surface, plat, (pos[0] + roomwidth // 2 - 2, pos[1] + roomheight),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight))
elif rtype == "cross":
pos = (border + x * (roomwidth + corridor), border + y * (roomwidth + corridor))
if torches > randint(0, 100):
surface.set_at(pos, (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1]), (4, 0, ltype))
surface.set_at((pos[0], pos[1] + roomheight - 1), (4, 0, ltype))
surface.set_at((pos[0] + roomwidth - 1, pos[1] + roomheight - 1), (4, 0, ltype))
#platforms on ground with corridor
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight // 2 + 2),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight // 2 + 2))
#over corridor
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight // 2 - 3),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight // 2 - 3))
if y > 0: #lowest platform
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] - 1),
(pos[0] + roomwidth // 2 + 1, pos[1] - 1))
if y < rooms: #high platform
pygame.draw.line(surface, plat,
(pos[0] + roomwidth // 2 - 2, pos[1] + roomheight),
(pos[0] + roomwidth // 2 + 1, pos[1] + roomheight))
else:
print(rtype)
raise AssertionError("")
if chest_mode and chestchance > randint(0, 100):
content = []
for spam in range(chest_mode):
item = choice(list(arenaitems.keys()))
content.append((arenaitems[item], item, 0))
for i in range(20 - len(content)): #chests always have 20 slots
content.append((0, None))
chests.append(((pos[0] + roomwidth // 2 - 1, pos[1] + roomheight // 2), content))
for chest in chests:
#draw the chests into the world texture
surface.blit(choice(chestsurflist), chest[0])
#surface.blit(multis["shadoworb"], chest[0])
# below is to make sure every chest stands on something, so they dont glitch
d = surface.get_at((chest[0][0], chest[0][1] + 2))[0]
if d > 250 or d == 51:
surface.set_at((chest[0][0], chest[0][1] + 2), (0, 0, 0))
d = surface.get_at((chest[0][0] + 1, chest[0][1] + 2))[0]
if d > 250 or d == 51:
surface.set_at((chest[0][0] + 1, chest[0][1] + 2), (0, 0, 0))
assert chest[0][0] < header["width"] and chest[0][0] > 0
assert chest[0][1] < header["height"] and chest[0][1] > 0
for x in range(1000 - len(chests)): #fill in nonechests, as terraria always has 1000 chests
chests.append(None)
z = header["width"] * header["height"] #tileamount
walls = defaultdict(lambda:None, {21: dwall,
31: dwall,
dtile: dwall,
4: dwall,
19: dwall})
def count(checks):
c = {}
for t_id in checks:
c[t_id] = 0
for x in range(size[0]):
for y in range(size[1]):
tid = surface.get_at((x, y))[0]
if tid in c:
c[tid] += 1
for tid in c:
amount = c[tid]
print("%-10s : %d" % (tiles[tid], amount))
self.header = header
#wooden platforms used to not be multitiles, so overwrite that
self.tiles = write_tiles(surface, header, walls, True, overwrite_no_mt = {19})
self.chests = chests
self.signs = [None] * 1000
self.npcs = [('Guide', (header["spawn"][0] * 16, (header["spawn"][1] - 3) * 16), 1,
(header["spawn"][0], header["spawn"][1] - 3))]
self.names = names
if __name__ == "__main__":
gen = Generator()
gen.run()
| |
from collections import namedtuple
debug = True
edge = namedtuple('edge',['src','dst','cost'])
class minimum_mean_cycle_states:
def __init__(self, number_of_nodes, edges):
self.time = 0
# This represents the time when the node is reached
self.start_time = [None] * number_of_nodes
# This represents whether a node is in the stack
# 0 implies it is never pushed on the stack
# 1 implies it is currently on the stack
# 2 implies it is currently being popped from the stack but we have not reported the strongly connected component yet
# 3 implies it left the stack
self.instack = [0] * number_of_nodes
# This represents, among all the nodes that are not currently declared as a strongly connected component
# The one with the smallest start time that can be reached through its subtree only.
self.low = [None] * number_of_nodes
self.node_stack = []
self.edge_stack = []
self.edges = edges
# This represents the currently found best cycle
self.best_cycle = None
# This represents the mean of the currently found best cycle
self.best_cycle_mean = None
def minimum_mean_cycle(number_of_nodes, adjacency_list, edges):
states = minimum_mean_cycle_states(number_of_nodes, edges)
for node in range(0, number_of_nodes):
if states.start_time[node] is None:
minimum_mean_cycle_helper(node, adjacency_list, states)
return states.best_cycle
def minimum_mean_cycle_helper(node, adjacency_list, states):
states.start_time[node] = states.time
states.time = states.time + 1
states.low[node] = states.start_time[node]
states.node_stack.append(node)
states.instack[node] = 1
for edge_index in adjacency_list[node]:
edge = states.edges[edge_index]
if states.start_time[edge.dst] is None:
# All the tree edges enters the edge stack
states.edge_stack.append(edge_index)
minimum_mean_cycle_helper(edge.dst, adjacency_list, states)
if states.low[edge.src] > states.low[edge.dst]:
states.low[edge.src] = states.low[edge.dst]
elif states.instack[edge.dst] == 1:
# So are all the edges that may end up in the strongly connected component
# We might encounter a back edge in this branch, and back edges always end up in a
# strongly connected component.
# But we might also encounter a cross edge or a forward edge in this branch, in that case
# the edge might cross two strongly connected components, and there is no good way
# to tell at this point.
states.edge_stack.append(edge_index)
if states.low[edge.src] > states.start_time[edge.dst]:
states.low[edge.src] = states.start_time[edge.dst]
else:
# Otherwise the edge is known to point to some other strongly connected component
# This is an edge we can safely skip from entering the edge stack
if not states.instack[edge.dst] == 3:
raise ValueError()
if states.start_time[node] == states.low[node]:
connected_component_nodes = []
connected_component_edge_indexes = []
while True:
stack_node = states.node_stack.pop()
# The node is marked as "being popped", not quite done with popping
states.instack[stack_node] = 2
connected_component_nodes.append(stack_node)
if node == stack_node:
break
# The popping should stop after we have emptied the stack (in case the node is the root of the depth first search)
while len(states.edge_stack) > 0:
# I claim that all edges within the strongly connected component must be currently at the
# top of the stack
edge_index = states.edge_stack.pop()
edge = states.edges[edge_index]
# If we find an edge that is going downwards to node, it has to be a tree edge.
# # This is because it is the first time when the node returns, it is impossible to be a forward edge.
if edge.dst == node and states.start_time[edge.src] < states.start_time[edge.dst]:
# Once we have found the tree edge to node, we can stop.
break
# I claim that states.instack[edge.src] == 2
if not states.instack[edge.src] == 2:
raise ValueError()
# The edge could be forward edge or cross edge to other strongly connected component
# Therefore we check if the destination of the edge is within the current strongly connected component
if states.instack[edge.dst] == 2:
connected_component_edge_indexes.append(edge_index)
for connected_component_node in connected_component_nodes:
states.instack[connected_component_node] = 3
if debug:
print("A connected component is found")
print("connected component nodes: %s" % connected_component_nodes)
print("connected component edges: %s" % [states.edges[connected_component_edge_index] for connected_component_edge_index in connected_component_edge_indexes])
minimum_mean_cycle_within_connected_component(connected_component_nodes, connected_component_edge_indexes, states)
def minimum_mean_cycle_within_connected_component(connected_component_nodes, connected_component_edge_indexes, states):
number_of_nodes = len(connected_component_nodes)
mapping = [0] * number_of_nodes
for i in range(0, number_of_nodes):
mapping[connected_component_nodes[i]] = i
# costs[v][k] represents the minimal cost to reach node v from 0 through exactly k edges
# None represents node v is unreachable from node s
costs = [[None for i in range(0, number_of_nodes + 1)] for j in range(0, number_of_nodes)]
# parents[v][k] represents the edge index we chose to reach the parent
parents = [[None for i in range(0, number_of_nodes + 1)] for j in range(0, number_of_nodes)]
# In 0 step, the only reachable node from 0 is 0, and its cost is 0
costs[0][0] = 0
# The best cost to reach node v through exactly k edge can be found by relaxing all the edges
for step in range(1, number_of_nodes + 1):
for edge_index in connected_component_edge_indexes:
edge = states.edges[edge_index]
src = mapping[edge.src]
dst = mapping[edge.dst]
if costs[src][step - 1] is not None:
new_cost = costs[src][step - 1] + edge.cost
if costs[dst][step] == None or new_cost < costs[dst][step]:
costs[dst][step] = new_cost
parents[dst][step] = edge_index
# Karp's formula, computing the max ratios
ratios = [None] * number_of_nodes
for node in range(0, number_of_nodes):
for step in range(0, number_of_nodes):
if costs[node][number_of_nodes] is not None and costs[node][step] is not None:
new_ratio = (costs[node][number_of_nodes] - costs[node][step])/(number_of_nodes - step)
if ratios[node] is None or new_ratio > ratios[node]:
ratios[node] = new_ratio
# Minimizing the max ratios over all nodes
best_ratio = None
best_node = None
for node in range(0, number_of_nodes):
if ratios[node] is not None and (best_ratio is None or ratios[node] < best_ratio):
best_ratio = ratios[node]
best_node = node
if debug:
print("Cost table, each row is a step")
for step in range(0, number_of_nodes + 1):
for node in range(0, number_of_nodes):
print(costs[node][step], end="\t")
print()
print()
print("Parent table, each row is a step")
for step in range(0, number_of_nodes + 1):
for node in range(0, number_of_nodes):
if parents[node][step] is None:
print(None, end="\t")
else:
print(mapping[states.edges[parents[node][step]].src], end="\t")
print()
print()
print("Ratio table")
for i in range(0, number_of_nodes):
print(ratios[i], end="\t")
print()
print()
print("best_ratio = %s" % best_ratio)
if best_node is not None:
print("best_node = %s" % connected_component_nodes[best_node])
if best_node is not None:
# There must be a cycle on a path of n nodes - find it
steps = [None] * number_of_nodes
node_cursor = best_node
step_cursor = number_of_nodes
cycle = []
while True:
if steps[node_cursor] is None:
# On each newly discovered node, mark it with the current step number
steps[node_cursor] = step_cursor
# And walk through the parent chain
node_cursor = mapping[states.edges[parents[node_cursor][step_cursor]].src]
step_cursor = step_cursor - 1
else:
# We have found the cycle, recover it by walking the cycle again starting
# from the original step number
stop_cursor = step_cursor
step_cursor = steps[node_cursor]
while step_cursor > stop_cursor:
cycle.append(parents[node_cursor][step_cursor])
node_cursor = mapping[states.edges[parents[node_cursor][step_cursor]].src]
step_cursor = step_cursor - 1
break
# The discovery order was in parent chain, so reverse it.
cycle.reverse()
if debug:
cycle_nodes = []
for edge_index in cycle:
cycle_nodes.append(states.edges[edge_index].dst)
print()
print("The best cycle found within the strongly connected component is %s" % cycle_nodes)
print()
if states.best_cycle_mean is None or states.best_cycle_mean > best_ratio:
states.best_cycle_mean = best_ratio
states.best_cycle = cycle
def main():
edges = [
edge(0,1,1),
edge(1,3,2),
edge(3,7,1),
edge(7,4,2),
edge(3,4,1),
edge(2,0,-1),
edge(1,2,3),
edge(6,1,1),
edge(5,6,2),
edge(4,5,-1),
]
adjacency_list = [[] for _ in range(0, 8)]
for i in range(0, len(edges)):
adjacency_list[edges[i].src].append(i)
print(minimum_mean_cycle(8, adjacency_list, edges))
return 0
if __name__ == "__main__":
main()
| |
import gc
from clang.cindex import CursorKind
from clang.cindex import TranslationUnit
from clang.cindex import TypeKind
from nose.tools import raises
from .util import get_cursor
from .util import get_tu
kInput = """\
typedef int I;
struct teststruct {
int a;
I b;
long c;
unsigned long d;
signed long e;
const int f;
int *g;
int ***h;
};
"""
def test_a_struct():
tu = get_tu(kInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Could not find teststruct."
fields = list(teststruct.get_children())
assert all(x.kind == CursorKind.FIELD_DECL for x in fields)
assert all(x.translation_unit is not None for x in fields)
assert fields[0].spelling == 'a'
assert not fields[0].type.is_const_qualified()
assert fields[0].type.kind == TypeKind.INT
assert fields[0].type.get_canonical().kind == TypeKind.INT
assert fields[1].spelling == 'b'
assert not fields[1].type.is_const_qualified()
assert fields[1].type.kind == TypeKind.TYPEDEF
assert fields[1].type.get_canonical().kind == TypeKind.INT
assert fields[1].type.get_declaration().spelling == 'I'
assert fields[2].spelling == 'c'
assert not fields[2].type.is_const_qualified()
assert fields[2].type.kind == TypeKind.LONG
assert fields[2].type.get_canonical().kind == TypeKind.LONG
assert fields[3].spelling == 'd'
assert not fields[3].type.is_const_qualified()
assert fields[3].type.kind == TypeKind.ULONG
assert fields[3].type.get_canonical().kind == TypeKind.ULONG
assert fields[4].spelling == 'e'
assert not fields[4].type.is_const_qualified()
assert fields[4].type.kind == TypeKind.LONG
assert fields[4].type.get_canonical().kind == TypeKind.LONG
assert fields[5].spelling == 'f'
assert fields[5].type.is_const_qualified()
assert fields[5].type.kind == TypeKind.INT
assert fields[5].type.get_canonical().kind == TypeKind.INT
assert fields[6].spelling == 'g'
assert not fields[6].type.is_const_qualified()
assert fields[6].type.kind == TypeKind.POINTER
assert fields[6].type.get_pointee().kind == TypeKind.INT
assert fields[7].spelling == 'h'
assert not fields[7].type.is_const_qualified()
assert fields[7].type.kind == TypeKind.POINTER
assert fields[7].type.get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().kind == TypeKind.POINTER
assert fields[7].type.get_pointee().get_pointee().get_pointee().kind == TypeKind.INT
def test_references():
"""Ensure that a Type maintains a reference to a TranslationUnit."""
tu = get_tu('int x;')
children = list(tu.cursor.get_children())
assert len(children) > 0
cursor = children[0]
t = cursor.type
assert isinstance(t.translation_unit, TranslationUnit)
# Delete main TranslationUnit reference and force a GC.
del tu
gc.collect()
assert isinstance(t.translation_unit, TranslationUnit)
# If the TU was destroyed, this should cause a segfault.
decl = t.get_declaration()
constarrayInput="""
struct teststruct {
void *A[2];
};
"""
def testConstantArray():
tu = get_tu(constarrayInput)
teststruct = get_cursor(tu, 'teststruct')
assert teststruct is not None, "Didn't find teststruct??"
fields = list(teststruct.get_children())
assert fields[0].spelling == 'A'
assert fields[0].type.kind == TypeKind.CONSTANTARRAY
assert fields[0].type.get_array_element_type() is not None
assert fields[0].type.get_array_element_type().kind == TypeKind.POINTER
assert fields[0].type.get_array_size() == 2
def test_equal():
"""Ensure equivalence operators work on Type."""
source = 'int a; int b; void *v;'
tu = get_tu(source)
a = get_cursor(tu, 'a')
b = get_cursor(tu, 'b')
v = get_cursor(tu, 'v')
assert a is not None
assert b is not None
assert v is not None
assert a.type == b.type
assert a.type != v.type
assert a.type != None
assert a.type != 'foo'
def test_typekind_spelling():
"""Ensure TypeKind.spelling works."""
tu = get_tu('int a;')
a = get_cursor(tu, 'a')
assert a is not None
assert a.type.kind.spelling == 'Int'
def test_function_argument_types():
"""Ensure that Type.argument_types() works as expected."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert args is not None
assert len(args) == 2
t0 = args[0]
assert t0 is not None
assert t0.kind == TypeKind.INT
t1 = args[1]
assert t1 is not None
assert t1.kind == TypeKind.INT
args2 = list(args)
assert len(args2) == 2
assert t0 == args2[0]
assert t1 == args2[1]
@raises(TypeError)
def test_argument_types_string_key():
"""Ensure that non-int keys raise a TypeError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
assert f is not None
args = f.type.argument_types()
assert len(args) == 2
args['foo']
@raises(IndexError)
def test_argument_types_negative_index():
"""Ensure that negative indexes on argument_types Raises an IndexError."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[-1]
@raises(IndexError)
def test_argument_types_overflow_index():
"""Ensure that indexes beyond the length of Type.argument_types() raise."""
tu = get_tu('void f(int, int);')
f = get_cursor(tu, 'f')
args = f.type.argument_types()
args[2]
@raises(Exception)
def test_argument_types_invalid_type():
"""Ensure that obtaining argument_types on a Type without them raises."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.type.argument_types()
def test_is_pod():
"""Ensure Type.is_pod() works."""
tu = get_tu('int i; void f();')
i = get_cursor(tu, 'i')
f = get_cursor(tu, 'f')
assert i is not None
assert f is not None
assert i.type.is_pod()
assert not f.type.is_pod()
def test_function_variadic():
"""Ensure Type.is_function_variadic works."""
source ="""
#include <stdarg.h>
void foo(int a, ...);
void bar(int a, int b);
"""
tu = get_tu(source)
foo = get_cursor(tu, 'foo')
bar = get_cursor(tu, 'bar')
assert foo is not None
assert bar is not None
assert isinstance(foo.type.is_function_variadic(), bool)
assert foo.type.is_function_variadic()
assert not bar.type.is_function_variadic()
def test_element_type():
"""Ensure Type.element_type works."""
tu = get_tu('int i[5];')
i = get_cursor(tu, 'i')
assert i is not None
assert i.type.kind == TypeKind.CONSTANTARRAY
assert i.type.element_type.kind == TypeKind.INT
@raises(Exception)
def test_invalid_element_type():
"""Ensure Type.element_type raises if type doesn't have elements."""
tu = get_tu('int i;')
i = get_cursor(tu, 'i')
assert i is not None
i.element_type
def test_element_count():
"""Ensure Type.element_count works."""
tu = get_tu('int i[5]; int j;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert i.type.element_count == 5
try:
j.type.element_count
assert False
except:
assert True
def test_is_volatile_qualified():
"""Ensure Type.is_volatile_qualified works."""
tu = get_tu('volatile int i = 4; int j = 2;')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_volatile_qualified(), bool)
assert i.type.is_volatile_qualified()
assert not j.type.is_volatile_qualified()
def test_is_restrict_qualified():
"""Ensure Type.is_restrict_qualified works."""
tu = get_tu('struct s { void * restrict i; void * j; };')
i = get_cursor(tu, 'i')
j = get_cursor(tu, 'j')
assert i is not None
assert j is not None
assert isinstance(i.type.is_restrict_qualified(), bool)
assert i.type.is_restrict_qualified()
assert not j.type.is_restrict_qualified()
| |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket.db import compute
from jacket.compute import exception
from jacket.objects.compute import flavor as flavor_obj
from jacket.tests.compute.unit.objects import test_objects
fake_flavor = {
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'name': 'm1.foo',
'memory_mb': 1024,
'vcpus': 4,
'root_gb': 20,
'ephemeral_gb': 0,
'flavorid': 'm1.foo',
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'},
}
class _TestFlavor(object):
@staticmethod
def _compare(test, compute, obj):
for field, value in compute.items():
test.assertEqual(compute[field], obj[field])
def test_get_by_id(self):
with mock.patch.object(compute, 'flavor_get') as get:
get.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_id(self.context, 1)
self._compare(self, fake_flavor, flavor)
def test_get_by_name(self):
with mock.patch.object(compute, 'flavor_get_by_name') as get_by_name:
get_by_name.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_name(self.context, 'm1.foo')
self._compare(self, fake_flavor, flavor)
def test_get_by_flavor_id(self):
with mock.patch.object(compute, 'flavor_get_by_flavor_id') as get_by_id:
get_by_id.return_value = fake_flavor
flavor = flavor_obj.Flavor.get_by_flavor_id(self.context,
'm1.foo')
self._compare(self, fake_flavor, flavor)
def test_add_access(self):
elevated = self.context.elevated()
flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
with mock.patch.object(compute, 'flavor_access_add') as add:
flavor.add_access('456')
add.assert_called_once_with(elevated, '123', '456')
def test_add_access_with_dirty_projects(self):
flavor = flavor_obj.Flavor(context=self.context, projects=['1'])
self.assertRaises(exception.ObjectActionError,
flavor.add_access, '2')
def test_remove_access(self):
elevated = self.context.elevated()
flavor = flavor_obj.Flavor(context=elevated, flavorid='123')
with mock.patch.object(compute, 'flavor_access_remove') as remove:
flavor.remove_access('456')
remove.assert_called_once_with(elevated, '123', '456')
def test_create(self):
flavor = flavor_obj.Flavor(context=self.context)
flavor.name = 'm1.foo'
flavor.extra_specs = fake_flavor['extra_specs']
with mock.patch.object(compute, 'flavor_create') as create:
create.return_value = fake_flavor
flavor.create()
self.assertEqual(self.context, flavor._context)
# NOTE(danms): Orphan this to avoid lazy-loads
flavor._context = None
self._compare(self, fake_flavor, flavor)
def test_create_with_projects(self):
context = self.context.elevated()
flavor = flavor_obj.Flavor(context=context)
flavor.name = 'm1.foo'
flavor.extra_specs = fake_flavor['extra_specs']
flavor.projects = ['project-1', 'project-2']
db_flavor = dict(fake_flavor, projects=list(flavor.projects))
with mock.patch.multiple(compute, flavor_create=mock.DEFAULT,
flavor_access_get_by_flavor_id=mock.DEFAULT
) as methods:
methods['flavor_create'].return_value = db_flavor
methods['flavor_access_get_by_flavor_id'].return_value = [
{'project_id': 'project-1'},
{'project_id': 'project-2'}]
flavor.create()
methods['flavor_create'].assert_called_once_with(
context,
{'name': 'm1.foo',
'extra_specs': fake_flavor['extra_specs']},
projects=['project-1', 'project-2'])
self.assertEqual(context, flavor._context)
# NOTE(danms): Orphan this to avoid lazy-loads
flavor._context = None
self._compare(self, fake_flavor, flavor)
self.assertEqual(['project-1', 'project-2'], flavor.projects)
def test_create_with_id(self):
flavor = flavor_obj.Flavor(context=self.context, id=123)
self.assertRaises(exception.ObjectActionError, flavor.create)
@mock.patch('compute.compute.flavor_access_add')
@mock.patch('compute.compute.flavor_access_remove')
@mock.patch('compute.compute.flavor_extra_specs_delete')
@mock.patch('compute.compute.flavor_extra_specs_update_or_create')
def test_save(self, mock_update, mock_delete, mock_remove, mock_add):
ctxt = self.context.elevated()
extra_specs = {'key1': 'value1', 'key2': 'value2'}
projects = ['project-1', 'project-2']
flavor = flavor_obj.Flavor(context=ctxt, flavorid='foo',
extra_specs=extra_specs, projects=projects)
flavor.obj_reset_changes()
# Test deleting an extra_specs key and project
del flavor.extra_specs['key1']
del flavor.projects[-1]
self.assertEqual(set(['extra_specs', 'projects']),
flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'value2'}, flavor.extra_specs)
mock_delete.assert_called_once_with(ctxt, 'foo', 'key1')
self.assertEqual(['project-1'], flavor.projects)
mock_remove.assert_called_once_with(ctxt, 'foo', 'project-2')
# Test updating an extra_specs key value
flavor.extra_specs['key2'] = 'foobar'
self.assertEqual(set(['extra_specs']), flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'foobar'}, flavor.extra_specs)
mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar'})
# Test adding an extra_specs and project
flavor.extra_specs['key3'] = 'value3'
flavor.projects.append('project-3')
self.assertEqual(set(['extra_specs', 'projects']),
flavor.obj_what_changed())
flavor.save()
self.assertEqual({'key2': 'foobar', 'key3': 'value3'},
flavor.extra_specs)
mock_update.assert_called_with(ctxt, 'foo', {'key2': 'foobar',
'key3': 'value3'})
self.assertEqual(['project-1', 'project-3'], flavor.projects)
mock_add.assert_called_once_with(ctxt, 'foo', 'project-3')
@mock.patch('compute.compute.flavor_create')
@mock.patch('compute.compute.flavor_extra_specs_delete')
@mock.patch('compute.compute.flavor_extra_specs_update_or_create')
def test_save_deleted_extra_specs(self, mock_update, mock_delete,
mock_create):
mock_create.return_value = dict(fake_flavor,
extra_specs={'key1': 'value1'})
ctxt = self.context.elevated()
flavor = flavor_obj.Flavor(context=ctxt)
flavor.flavorid = 'test'
flavor.extra_specs = {'key1': 'value1'}
flavor.create()
flavor.extra_specs = {}
flavor.save()
mock_delete.assert_called_once_with(ctxt, flavor.flavorid,
'key1')
self.assertFalse(mock_update.called)
def test_save_invalid_fields(self):
flavor = flavor_obj.Flavor(id=123)
self.assertRaises(exception.ObjectActionError, flavor.save)
def test_destroy(self):
flavor = flavor_obj.Flavor(context=self.context, id=123, name='foo')
with mock.patch.object(compute, 'flavor_destroy') as destroy:
flavor.destroy()
destroy.assert_called_once_with(self.context, flavor.name)
def test_load_projects(self):
flavor = flavor_obj.Flavor(context=self.context, flavorid='foo')
with mock.patch.object(compute, 'flavor_access_get_by_flavor_id') as get:
get.return_value = [{'project_id': 'project-1'}]
projects = flavor.projects
self.assertEqual(['project-1'], projects)
self.assertNotIn('projects', flavor.obj_what_changed())
def test_load_anything_else(self):
flavor = flavor_obj.Flavor()
self.assertRaises(exception.ObjectActionError,
getattr, flavor, 'name')
class TestFlavor(test_objects._LocalTest, _TestFlavor):
pass
class TestFlavorRemote(test_objects._RemoteTest, _TestFlavor):
pass
class _TestFlavorList(object):
def test_get_all(self):
with mock.patch.object(compute, 'flavor_get_all') as get_all:
get_all.return_value = [fake_flavor]
filters = {'min_memory_mb': 4096}
flavors = flavor_obj.FlavorList.get_all(self.context,
inactive=False,
filters=filters,
sort_key='id',
sort_dir='asc')
self.assertEqual(1, len(flavors))
_TestFlavor._compare(self, fake_flavor, flavors[0])
get_all.assert_called_once_with(self.context, inactive=False,
filters=filters, sort_key='id',
sort_dir='asc', limit=None,
marker=None)
class TestFlavorList(test_objects._LocalTest, _TestFlavorList):
pass
class TestFlavorListRemote(test_objects._RemoteTest, _TestFlavorList):
pass
| |
import logging
import warnings
import sys
import os
from os import path as op
import inspect
from functools import wraps
import subprocess
import numpy as np
import nibabel as nib
from scipy import sparse
from scipy.spatial.distance import cdist
import matplotlib as mpl
from matplotlib import cm
logger = logging.getLogger('surfer')
# Py3k compat
if sys.version[0] == '2':
string_types = basestring # noqa
else:
string_types = str
class Surface(object):
"""Container for surface object
Attributes
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
data_path : string
Path where to look for data
x: 1d array
x coordinates of vertices
y: 1d array
y coordinates of vertices
z: 1d array
z coordinates of vertices
coords : 2d array of shape [n_vertices, 3]
The vertices coordinates
faces : 2d array
The faces ie. the triangles
nn : 2d array
Normalized surface normals for vertices.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment variable.
"""
def __init__(self, subject_id, hemi, surf, subjects_dir=None,
offset=None):
"""Surface
Parameters
----------
subject_id : string
Name of subject
hemi : {'lh', 'rh'}
Which hemisphere to load
surf : string
Name of the surface to load (eg. inflated, orig ...)
offset : float | None
If 0.0, the surface will be offset such that the medial
wall is aligned with the origin. If None, no offset will
be applied. If != 0.0, an additional offset will be used.
"""
if hemi not in ['lh', 'rh']:
raise ValueError('hemi must be "lh" or "rh')
self.subject_id = subject_id
self.hemi = hemi
self.surf = surf
self.offset = offset
subjects_dir = _get_subjects_dir(subjects_dir)
self.data_path = op.join(subjects_dir, subject_id)
def load_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
self.coords, self.faces = nib.freesurfer.read_geometry(surf_path)
if self.offset is not None:
if self.hemi == 'lh':
self.coords[:, 0] -= (np.max(self.coords[:, 0]) + self.offset)
else:
self.coords[:, 0] -= (np.min(self.coords[:, 0]) + self.offset)
self.nn = _compute_normals(self.coords, self.faces)
def save_geometry(self):
surf_path = op.join(self.data_path, "surf",
"%s.%s" % (self.hemi, self.surf))
nib.freesurfer.write_geometry(surf_path, self.coords, self.faces)
@property
def x(self):
return self.coords[:, 0]
@property
def y(self):
return self.coords[:, 1]
@property
def z(self):
return self.coords[:, 2]
def load_curvature(self):
"""Load in curvature values from the ?h.curv file."""
curv_path = op.join(self.data_path, "surf", "%s.curv" % self.hemi)
self.curv = nib.freesurfer.read_morph_data(curv_path)
self.bin_curv = np.array(self.curv > 0, np.int)
def load_label(self, name):
"""Load in a Freesurfer .label file.
Label files are just text files indicating the vertices included
in the label. Each Surface instance has a dictionary of labels, keyed
by the name (which is taken from the file name if not given as an
argument.
"""
label = nib.freesurfer.read_label(op.join(self.data_path, 'label',
'%s.%s.label' % (self.hemi, name)))
label_array = np.zeros(len(self.x), np.int)
label_array[label] = 1
try:
self.labels[name] = label_array
except AttributeError:
self.labels = {name: label_array}
def apply_xfm(self, mtx):
"""Apply an affine transformation matrix to the x,y,z vectors."""
self.coords = np.dot(np.c_[self.coords, np.ones(len(self.coords))],
mtx.T)[:, :3]
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
"""
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def _compute_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface"""
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
zidx = np.where(size == 0)[0]
size[zidx] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts, tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn
###############################################################################
# LOGGING (courtesy of mne-python)
def set_log_level(verbose=None, return_old_level=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOG_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
"""
if verbose is None:
verbose = "INFO"
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, string_types):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
old_verbose = logger.level
logger.setLevel(verbose)
return (old_verbose if return_old_level else None)
class WrapStdOut(object):
"""Ridiculous class to work around how doctest captures stdout"""
def __getattr__(self, name):
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
return getattr(sys.stdout, name)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Convenience function for setting the log to print to a file
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
http://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool, or None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
handlers = logger.handlers
for h in handlers:
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
if fname is not None:
if op.isfile(fname) and overwrite is None:
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.')
mode = 'w' if overwrite is True else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
def verbose(function):
"""Decorator to allow functions to override default log level
Do not call this function directly to set the global verbosity level,
instead use set_log_level().
Parameters (to decorated function)
----------------------------------
verbose : bool, str, int, or None
The level of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
None defaults to using the current log level [e.g., set using
mne.set_log_level()].
"""
arg_names = inspect.getargspec(function).args
# this wrap allows decorated functions to be pickled (e.g., for parallel)
@wraps(function)
def dec(*args, **kwargs):
# Check if the first arg is "self", if it has verbose, make it default
if len(arg_names) > 0 and arg_names[0] == 'self':
default_level = getattr(args[0], 'verbose', None)
else:
default_level = None
verbose_level = kwargs.get('verbose', default_level)
if verbose_level is not None:
old_level = set_log_level(verbose_level, True)
# set it back if we get an exception
try:
ret = function(*args, **kwargs)
except:
set_log_level(old_level)
raise
set_log_level(old_level)
return ret
else:
return function(*args, **kwargs)
# set __wrapped__ attribute so ?? in IPython gets the right source
dec.__wrapped__ = function
return dec
###############################################################################
# USEFUL FUNCTIONS
def find_closest_vertices(surface_coords, point_coords):
"""Return the vertices on a surface mesh closest to some given coordinates.
The distance metric used is Euclidian distance.
Parameters
----------
surface_coords : numpy array
Array of coordinates on a surface mesh
point_coords : numpy array
Array of coordinates to map to vertices
Returns
-------
closest_vertices : numpy array
Array of mesh vertex ids
"""
point_coords = np.atleast_2d(point_coords)
return np.argmin(cdist(surface_coords, point_coords), axis=0)
def tal_to_mni(coords):
"""Convert Talairach coords to MNI using the Lancaster transform.
Parameters
----------
coords : n x 3 numpy array
Array of Talairach coordinates
Returns
-------
mni_coords : n x 3 numpy array
Array of coordinates converted to MNI space
"""
coords = np.atleast_2d(coords)
xfm = np.array([[1.06860, -0.00396, 0.00826, 1.07816],
[0.00640, 1.05741, 0.08566, 1.16824],
[-0.01281, -0.08863, 1.10792, -4.17805],
[0.00000, 0.00000, 0.00000, 1.00000]])
mni_coords = np.dot(np.c_[coords, np.ones(coords.shape[0])], xfm.T)[:, :3]
return mni_coords
def mesh_edges(faces):
"""Returns sparse matrix with edges as an adjacency matrix
Parameters
----------
faces : array of shape [n_triangles x 3]
The mesh faces
Returns
-------
edges : sparse matrix
The adjacency matrix
"""
npoints = np.max(faces) + 1
nfaces = len(faces)
a, b, c = faces.T
edges = sparse.coo_matrix((np.ones(nfaces), (a, b)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (b, c)),
shape=(npoints, npoints))
edges = edges + sparse.coo_matrix((np.ones(nfaces), (c, a)),
shape=(npoints, npoints))
edges = edges + edges.T
edges = edges.tocoo()
return edges
def create_color_lut(cmap, n_colors=256):
"""Return a colormap suitable for setting as a Mayavi LUT.
Parameters
----------
cmap : string, list of colors, n x 3 or n x 4 array
Input colormap definition. This can be the name of a matplotlib
colormap, a list of valid matplotlib colors, or a suitable
mayavi LUT (possibly missing the alpha channel).
n_colors : int, optional
Number of colors in the resulting LUT. This is ignored if cmap
is a 2d array.
Returns
-------
lut : n_colors x 4 integer array
Color LUT suitable for passing to mayavi
"""
if isinstance(cmap, np.ndarray):
if np.ndim(cmap) == 2:
if cmap.shape[1] == 4:
# This looks likes a LUT that's ready to go
lut = cmap.astype(np.int)
elif cmap.shape[1] == 3:
# This looks like a LUT, but it's missing the alpha channel
alpha = np.ones(len(cmap), np.int) * 255
lut = np.c_[cmap, alpha]
return lut
# Otherwise, we're going to try and use matplotlib to create it
if cmap in dir(cm):
# This is probably a matplotlib colormap, so build from that
# The matplotlib colormaps are a superset of the mayavi colormaps
# except for one or two cases (i.e. blue-red, which is a crappy
# rainbow colormap and shouldn't be used for anything, although in
# its defense it's better than "Jet")
cmap = getattr(cm, cmap)
elif np.iterable(cmap):
# This looks like a list of colors? Let's try that.
colors = list(map(mpl.colors.colorConverter.to_rgb, cmap))
cmap = mpl.colors.LinearSegmentedColormap.from_list("_", colors)
else:
# If we get here, it's a bad input
raise ValueError("Input %s was not valid for making a lut" % cmap)
# Convert from a matplotlib colormap to a lut array
lut = (cmap(np.linspace(0, 1, n_colors)) * 255).astype(np.int)
return lut
@verbose
def smoothing_matrix(vertices, adj_mat, smoothing_steps=20, verbose=None):
"""Create a smoothing matrix which can be used to interpolate data defined
for a subset of vertices onto mesh with an adjancency matrix given by
adj_mat.
If smoothing_steps is None, as many smoothing steps are applied until
the whole mesh is filled with with non-zeros. Only use this option if
the vertices correspond to a subsampled version of the mesh.
Parameters
----------
vertices : 1d array
vertex indices
adj_mat : sparse matrix
N x N adjacency matrix of the full mesh
smoothing_steps : int or None
number of smoothing steps (Default: 20)
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
Returns
-------
smooth_mat : sparse matrix
smoothing matrix with size N x len(vertices)
"""
from scipy import sparse
logger.info("Updating smoothing matrix, be patient..")
e = adj_mat.copy()
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices
smooth_mat = 1.0
n_iter = smoothing_steps if smoothing_steps is not None else 1000
for k in range(n_iter):
e_use = e[:, idx_use]
data1 = e_use * np.ones(len(idx_use))
idx_use = np.where(data1)[0]
scale_mat = sparse.dia_matrix((1 / data1[idx_use], 0),
shape=(len(idx_use), len(idx_use)))
smooth_mat = scale_mat * e_use[idx_use, :] * smooth_mat
logger.info("Smoothing matrix creation, step %d" % (k + 1))
if smoothing_steps is None and len(idx_use) >= n_vertices:
break
# Make sure the smoothing matrix has the right number of rows
# and is in COO format
smooth_mat = smooth_mat.tocoo()
smooth_mat = sparse.coo_matrix((smooth_mat.data,
(idx_use[smooth_mat.row],
smooth_mat.col)),
shape=(n_vertices,
len(vertices)))
return smooth_mat
@verbose
def coord_to_label(subject_id, coord, label, hemi='lh', n_steps=30,
map_surface='white', coord_as_vert=False, verbose=None):
"""Create label from MNI coordinate
Parameters
----------
subject_id : string
Use if file is in register with subject's orig.mgz
coord : numpy array of size 3 | int
One coordinate in MNI space or the vertex index.
label : str
Label name
hemi : [lh, rh]
Hemisphere target
n_steps : int
Number of dilation iterations
map_surface : str
The surface name used to find the closest point
coord_as_vert : bool
whether the coords parameter should be interpreted as vertex ids
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
geo = Surface(subject_id, hemi, map_surface)
geo.load_geometry()
if coord_as_vert:
coord = geo.coords[coord]
n_vertices = len(geo.coords)
adj_mat = mesh_edges(geo.faces)
foci_vtxs = find_closest_vertices(geo.coords, [coord])
data = np.zeros(n_vertices)
data[foci_vtxs] = 1.
smooth_mat = smoothing_matrix(np.arange(n_vertices), adj_mat, 1)
for _ in range(n_steps):
data = smooth_mat * data
idx = np.where(data.ravel() > 0)[0]
# Write label
label_fname = label + '-' + hemi + '.label'
logger.info("Saving label : %s" % label_fname)
f = open(label_fname, 'w')
f.write('#label at %s from subject %s\n' % (coord, subject_id))
f.write('%d\n' % len(idx))
for i in idx:
x, y, z = geo.coords[i]
f.write('%d %f %f %f 0.000000\n' % (i, x, y, z))
def _get_subjects_dir(subjects_dir=None, raise_error=True):
"""Get the subjects directory from parameter or environment variable
Parameters
----------
subjects_dir : str | None
The subjects directory.
raise_error : bool
If True, raise a ValueError if no value for SUBJECTS_DIR can be found
or the corresponding directory does not exist.
Returns
-------
subjects_dir : str
The subjects directory. If the subjects_dir input parameter is not
None, its value will be returned, otherwise it will be obtained from
the SUBJECTS_DIR environment variable.
"""
if subjects_dir is None:
subjects_dir = os.environ.get("SUBJECTS_DIR", "")
if not subjects_dir and raise_error:
raise ValueError('The subjects directory has to be specified '
'using the subjects_dir parameter or the '
'SUBJECTS_DIR environment variable.')
if raise_error and not os.path.exists(subjects_dir):
raise ValueError('The subjects directory %s does not exist.'
% subjects_dir)
return subjects_dir
def has_fsaverage(subjects_dir=None):
"""Determine whether the user has a usable fsaverage"""
fs_dir = op.join(_get_subjects_dir(subjects_dir, False), 'fsaverage')
if not op.isdir(fs_dir):
return False
if not op.isdir(op.join(fs_dir, 'surf')):
return False
return True
requires_fsaverage = np.testing.dec.skipif(not has_fsaverage(),
'Requires fsaverage subject data')
def has_ffmpeg():
"""Test whether the FFmpeg is available in a subprocess
Returns
-------
ffmpeg_exists : bool
True if FFmpeg can be successfully called, False otherwise.
"""
try:
subprocess.call(["ffmpeg"], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
except OSError:
return False
def assert_ffmpeg_is_available():
"Raise a RuntimeError if FFmpeg is not in the PATH"
if not has_ffmpeg():
err = ("FFmpeg is not in the path and is needed for saving "
"movies. Install FFmpeg and try again. It can be "
"downlaoded from http://ffmpeg.org/download.html.")
raise RuntimeError(err)
requires_ffmpeg = np.testing.dec.skipif(not has_ffmpeg(), 'Requires FFmpeg')
def ffmpeg(dst, frame_path, framerate=24, codec='mpeg4', bitrate='1M'):
"""Run FFmpeg in a subprocess to convert an image sequence into a movie
Parameters
----------
dst : str
Destination path. If the extension is not ".mov" or ".avi", ".mov" is
added. If the file already exists it is overwritten.
frame_path : str
Path to the source frames (with a frame number field like '%04d').
framerate : float
Framerate of the movie (frames per second, default 24).
codec : str | None
Codec to use (default 'mpeg4'). If None, the codec argument is not
forwarded to ffmpeg, which preserves compatibility with very old
versions of ffmpeg
bitrate : str | float
Bitrate to use to encode movie. Can be specified as number (e.g.
64000) or string (e.g. '64k'). Default value is 1M
Notes
-----
Requires FFmpeg to be in the path. FFmpeg can be downlaoded from `here
<http://ffmpeg.org/download.html>`_. Stdout and stderr are written to the
logger. If the movie file is not created, a RuntimeError is raised.
"""
assert_ffmpeg_is_available()
# find target path
dst = os.path.expanduser(dst)
dst = os.path.abspath(dst)
root, ext = os.path.splitext(dst)
dirname = os.path.dirname(dst)
if ext not in ['.mov', '.avi']:
dst += '.mov'
if os.path.exists(dst):
os.remove(dst)
elif not os.path.exists(dirname):
os.mkdir(dirname)
frame_dir, frame_fmt = os.path.split(frame_path)
# make the movie
cmd = ['ffmpeg', '-i', frame_fmt, '-r', str(framerate), '-b', str(bitrate)]
if codec is not None:
cmd += ['-c', codec]
cmd += [dst]
logger.info("Running FFmpeg with command: %s", ' '.join(cmd))
sp = subprocess.Popen(cmd, cwd=frame_dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# log stdout and stderr
stdout, stderr = sp.communicate()
std_info = os.linesep.join(("FFmpeg stdout", '=' * 25, stdout))
logger.info(std_info)
if stderr.strip():
err_info = os.linesep.join(("FFmpeg stderr", '=' * 27, stderr))
logger.error(err_info)
# check that movie file is created
if not os.path.exists(dst):
err = ("FFmpeg failed, no file created; see log for more more "
"information.")
raise RuntimeError(err)
| |
## Automatically adapted for numpy.oldnumeric Jun 27, 2008 by -c
# $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from rdkit import Geometry
from rdkit.Chem.Subshape import SubshapeObjects
import math
import numpy
#-----------------------------------------------------------------------------
def ComputeGridIndices(shapeGrid,winRad):
if getattr(shapeGrid,'_indicesInSphere',None):
return shapeGrid._indicesInSphere
gridSpacing = shapeGrid.GetSpacing()
dX = shapeGrid.GetNumX()
dY = shapeGrid.GetNumY()
dZ = shapeGrid.GetNumZ()
radInGrid = int(winRad/gridSpacing)
indicesInSphere=[]
for i in range(-radInGrid,radInGrid+1):
for j in range(-radInGrid,radInGrid+1):
for k in range(-radInGrid,radInGrid+1):
d=int(math.sqrt(i*i + j*j + k*k ))
if d<=radInGrid:
idx = (i*dY+j)*dX+k
indicesInSphere.append(idx)
shapeGrid._indicesInSphere = indicesInSphere
return indicesInSphere
#-----------------------------------------------------------------------------
def ComputeShapeGridCentroid(pt,shapeGrid,winRad):
count=0
centroid = Geometry.Point3D(0,0,0)
idxI = shapeGrid.GetGridPointIndex(pt)
shapeGridVect = shapeGrid.GetOccupancyVect()
indicesInSphere = ComputeGridIndices(shapeGrid,winRad)
nGridPts = len(shapeGridVect)
for idxJ in indicesInSphere:
idx = idxI+idxJ;
if idx>=0 and idx<nGridPts:
wt = shapeGridVect[idx]
tPt = shapeGrid.GetGridPointLoc(idx)
centroid += tPt*wt
count+=wt
if not count:
raise ValueError,'found no weight in sphere'
centroid /= count
#print 'csgc:','(%2f,%2f,%2f)'%tuple(pt),'(%2f,%2f,%2f)'%tuple(centroid),count
return count,centroid
#-----------------------------------------------------------------------------
def FindTerminalPtsFromShape(shape,winRad,fraction,maxGridVal=3):
pts = Geometry.FindGridTerminalPoints(shape.grid,winRad,fraction)
termPts = [SubshapeObjects.SkeletonPoint(location=x) for x in pts]
return termPts
#-----------------------------------------------------------------------------
def FindTerminalPtsFromConformer(conf,winRad,nbrCount):
mol = conf.GetOwningMol()
nAts = conf.GetNumAtoms()
nbrLists=[[] for x in range(nAts)]
for i in range(nAts):
if(mol.GetAtomWithIdx(i).GetAtomicNum()<=1): continue
pi = conf.GetAtomPosition(i)
nbrLists[i].append((i,pi))
for j in range(i+1,nAts):
if(mol.GetAtomWithIdx(j).GetAtomicNum()<=1): continue
pj = conf.GetAtomPosition(j)
dist = pi.Distance(conf.GetAtomPosition(j))
if dist<winRad:
nbrLists[i].append((j,pj))
nbrLists[j].append((i,pi))
termPts=[]
#for i in range(nAts):
# if not len(nbrLists[i]): continue
# if len(nbrLists[i])>10:
# print i+1,len(nbrLists[i])
# else:
# print i+1,len(nbrLists[i]),[x[0]+1 for x in nbrLists[i]]
while 1:
for i in range(nAts):
if not nbrLists[i]: continue
pos = Geometry.Point3D(0,0,0)
totWt=0.0
if len(nbrLists[i])<nbrCount:
nbrList = nbrLists[i]
for j in range(0,len(nbrList)):
nbrJ,posJ=nbrList[j]
weight = 1.*len(nbrLists[i])/len(nbrLists[nbrJ])
pos += posJ*weight
totWt+=weight
pos /= totWt
termPts.append(SubshapeObjects.SkeletonPoint(location=pos))
if not len(termPts):
nbrCount += 1
else:
break
return termPts
#-----------------------------------------------------------------------------
def FindGridPointBetweenPoints(pt1,pt2,shapeGrid,winRad):
center = pt1+pt2
center /= 2.0
d=1e8
while d>shapeGrid.GetSpacing():
count,centroid=Geometry.ComputeGridCentroid(shapeGrid,center,winRad)
d = center.Distance(centroid)
center = centroid
return center
#-----------------------------------------------------------------------------
def ClusterTerminalPts(pts,winRad,scale):
res = []
tagged = [(y,x) for x,y in enumerate(pts)]
while tagged:
head,headIdx = tagged.pop(0)
currSet = [head]
i=0
while i<len(tagged):
nbr,nbrIdx=tagged[i]
if head.location.Distance(nbr.location)<scale*winRad:
currSet.append(nbr)
del tagged[i]
else:
i+=1
pt = Geometry.Point3D(0,0,0)
for o in currSet:
pt += o.location
pt /= len(currSet)
res.append(SubshapeObjects.SkeletonPoint(location=pt))
return res
def GetMoreTerminalPoints(shape,pts,winRad,maxGridVal,targetNumber=5):
""" adds a set of new terminal points using a max-min algorithm
"""
shapeGrid=shape.grid
shapeVect = shapeGrid.GetOccupancyVect()
nGridPts = len(shapeVect)
# loop, taking the grid point with the maximum minimum distance, until
# we have enough points
while len(pts)<targetNumber:
maxMin=-1
for i in range(nGridPts):
if shapeVect[i]<maxGridVal:
continue
minVal=1e8
posI = shapeGrid.GetGridPointLoc(i)
for currPt in pts:
dst = posI.Distance(currPt.location)
if dst<minVal:
minVal=dst
if minVal>maxMin:
maxMin=minVal
bestPt=posI
count,centroid=Geometry.ComputeGridCentroid(shapeGrid,bestPt,winRad)
pts.append(SubshapeObjects.SkeletonPoint(location=centroid))
def FindFarthestGridPoint(shape,loc,winRad,maxGridVal):
""" find the grid point with max occupancy that is furthest from a
given location
"""
shapeGrid=shape.grid
shapeVect = shapeGrid.GetOccupancyVect()
nGridPts = len(shapeVect)
dMax=-1;
for i in range(nGridPts):
if shapeVect[i]<maxGridVal:
continue
posI = shapeGrid.GetGridPointLoc(i)
dst = posI.Distance(loc)
if dst>dMax:
dMax=dst
res=posI
count,centroid=Geometry.ComputeGridCentroid(shapeGrid,res,winRad)
res=centroid
return res
def ExpandTerminalPts(shape,pts,winRad,maxGridVal=3.0,targetNumPts=5):
""" find additional terminal points until a target number is reached
"""
if len(pts)==1:
# if there's only one point, find the grid point with max value that is
# *farthest* from this one and use it:
pt2=FindFarthestGridPoint(shape,pts[0].location,winRad,maxGridVal)
pts.append(SubshapeObjects.SkeletonPoint(location=pt2))
if len(pts)==2:
# add a point roughly in the middle:
shapeGrid=shape.grid
pt1 = pts[0].location
pt2 = pts[1].location
center = FindGridPointBetweenPoints(pt1,pt2,shapeGrid,winRad)
pts.append(SubshapeObjects.SkeletonPoint(location=center))
if len(pts)<targetNumPts:
GetMoreTerminalPoints(shape,pts,winRad,maxGridVal,targetNumPts)
#-----------------------------------------------------------------------------
def AppendSkeletonPoints(shapeGrid,termPts,winRad,stepDist,maxGridVal=3,
maxDistC=15.0,distTol=1.5,symFactor=1.5):
nTermPts = len(termPts)
skelPts=[]
shapeVect = shapeGrid.GetOccupancyVect()
nGridPts = len(shapeVect)
# find all possible skeleton points
print 'generate all possible'
for i in range(nGridPts):
if shapeVect[i]<maxGridVal:
continue
posI = shapeGrid.GetGridPointLoc(i)
ok=True
for pt in termPts:
dst = posI.Distance(pt.location)
if dst<stepDist:
ok=False
break
if ok:
skelPts.append(SubshapeObjects.SkeletonPoint(location=posI))
# now start removing them
print 'Compute centroids:',len(skelPts)
gridBoxVolume=shapeGrid.GetSpacing()**3
maxVol = 4.0*math.pi/3.0 * winRad**3 * maxGridVal / gridBoxVolume
i=0
while i<len(skelPts):
pt = skelPts[i]
count,centroid=Geometry.ComputeGridCentroid(shapeGrid,pt.location,winRad)
#count,centroid=ComputeShapeGridCentroid(pt.location,shapeGrid,winRad)
centroidPtDist=centroid.Distance(pt.location)
if centroidPtDist>maxDistC:
del skelPts[i]
else:
pt.fracVol = float(count)/maxVol
pt.location.x = centroid.x
pt.location.y = centroid.y
pt.location.z = centroid.z
i+=1
print 'remove points:',len(skelPts)
res = termPts+skelPts
i=0
while i<len(res):
p=-1
mFrac=0.0
ptI = res[i]
startJ = max(i+1,nTermPts)
for j in range(startJ,len(res)):
ptJ=res[j]
distC = ptI.location.Distance(ptJ.location)
if distC<symFactor*stepDist:
if ptJ.fracVol>mFrac:
p=j
mFrac=ptJ.fracVol
#print i,len(res),p,mFrac
if p>-1:
ptP = res.pop(p)
j = startJ
while j < len(res):
ptJ=res[j]
distC = ptI.location.Distance(ptJ.location)
if distC<symFactor*stepDist:
del res[j]
else:
j+=1
res.append(ptP)
#print '% 3d'%i,'% 5.2f % 5.2f % 5.2f'%tuple(list(ptI.location)),' - ','% 5.2f % 5.2f % 5.2f'%tuple(list(ptJ.location))
i+=1
return res
#-----------------------------------------------------------------------------
def CalculateDirectionsAtPoint(pt,shapeGrid,winRad):
shapeGridVect = shapeGrid.GetOccupancyVect()
nGridPts = len(shapeGridVect)
tmp = winRad/shapeGrid.GetSpacing()
radInGrid=int(tmp)
radInGrid2=int(tmp*tmp)
covMat = numpy.zeros((3,3),numpy.float64)
dX = shapeGrid.GetNumX()
dY = shapeGrid.GetNumY()
dZ = shapeGrid.GetNumZ()
idx = shapeGrid.GetGridPointIndex(pt.location)
idxZ = idx//(dX*dY)
rem = idx%(dX*dY)
idxY = rem//dX
idxX = rem%dX
totWt=0.0
for i in range(-radInGrid,radInGrid):
xi = idxX+i
for j in range(-radInGrid,radInGrid):
xj = idxY+j
for k in range(-radInGrid,radInGrid):
xk = idxZ+k
d2 = i*i+j*j+k*k
if d2>radInGrid2 and int(math.sqrt(d2))>radInGrid:
continue
gridIdx = (xk*dY+xj)*dX+xi
if gridIdx>=0 and gridIdx<nGridPts:
wtHere = shapeGridVect[gridIdx]
totWt += wtHere
ptInSphere = shapeGrid.GetGridPointLoc(gridIdx)
ptInSphere -= pt.location
covMat[0][0]+= wtHere*(ptInSphere.x*ptInSphere.x)
covMat[0][1]+= wtHere*(ptInSphere.x*ptInSphere.y)
covMat[0][2]+= wtHere*(ptInSphere.x*ptInSphere.z)
covMat[1][1]+= wtHere*(ptInSphere.y*ptInSphere.y)
covMat[1][2]+= wtHere*(ptInSphere.y*ptInSphere.z)
covMat[2][2]+= wtHere*(ptInSphere.z*ptInSphere.z)
covMat /= totWt
covMat[1][0] = covMat[0][1]
covMat[2][0] = covMat[0][2]
covMat[2][1] = covMat[1][2]
eVals,eVects = numpy.linalg.eigh(covMat)
sv = zip(eVals,numpy.transpose(eVects))
sv.sort(reverse=True)
eVals,eVects=zip(*sv)
pt.shapeMoments=tuple(eVals)
pt.shapeDirs = tuple([Geometry.Point3D(p[0],p[1],p[2]) for p in eVects])
#print '-------------'
#print pt.location.x,pt.location.y,pt.location.z
#for v in covMat:
# print ' ',v
#print '---'
#print eVals
#for v in eVects:
# print ' ',v
#print '-------------'
#-----------------------------------------------------------------------------
def AssignMolFeatsToPoints(pts,mol,featFactory,winRad):
feats = featFactory.GetFeaturesForMol(mol)
for i,pt in enumerate(pts):
for feat in feats:
if feat.GetPos().Distance(pt.location)<winRad:
typ = feat.GetFamily()
if typ not in pt.molFeatures:
pt.molFeatures.append(typ)
print i,pt.molFeatures
| |
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
import numpy
class ArrayModule(object):
my_namespace = 'numpy|array'
class NDArray(Module, ArrayModule):
""" Container class for the numpy.ndarray class """
def __init__(self):
Module.__init__(self)
self.array = None
self.names = {}
self.general_name = None
self.domain = ''
self.range = ''
def get_shape(self):
return self.array.shape
# Array Operation
def reshape(self, shape):
self.array.shape = shape
# Array Access
def get_reals(self):
return self.array.real
# Array Access
def get_imaginary(self):
return self.array.imag
# Array Access
def get_max(self):
return self.array.max()
# Array Access
def get_mean(self, axis=None):
return self.array.mean(axis=axis)
# Array Access
def get_min(self):
return self.array.min()
# Array Operation
def cumulative_sum(self):
return self.array.cumsum()
# Array Access
def get_arg_min(self):
return self.array.argmin()
# Array Access
def get_arg_max(self):
return self.array.argmax()
# Array Access
def get_diagonal(self):
return self.array.diagonal()
# Array Naming
def get_name(self, row):
if self.names.has_key(row):
return self.names[row]
else:
return None
# Array Naming
def get_general_name(self):
return self.general_name
# Array Naming
def set_name(self, name, index=False):
self.general_name = name
for i in range(self.array.shape[0]):
if index:
self.names[i] = name + " " + str(i+1)
else:
self.names[i] = name + " " + str(i)
# Array Naming
def set_row_name(self, name, row):
self.names[row] = name
# Array Naming
def clear_names(self):
self.general_name = None
self.names = {}
# Array Naming
def get_domain_name(self):
return self.domain
# Array Naming
def get_range_name(self):
return self.range
# Array Naming
def set_domain_name(self, name):
self.domain = name
# Array Naming
def set_range_name(self, name):
self.range = name
# Array Naming
def clear_domain_name(self):
self.domain = ''
# Array Naming
def clear_range_name(self):
self.range = ''
# Array Operation
def sort_array(self, axis=-1, kind='quicksort', order=None):
return self.array.argsort(axis, kind, order)
# Array Access
def get_array_as_type(self, t):
return self.array.astype(t)
# Array Operation
def swap_bytes(self, view=False):
return self.array.byteswap(view)
# Array Access
def get_conjugate(self):
return self.array.conjugate().copy()
# Array Operation
def cumulative_product(self):
return self.array.cumprod()
# Array Convert
def dump_to_file(self, fn):
self.array.dump(fn)
# Array Convert
def dump_to_string(self):
return self.array.dumps()
# Array Operation
def fill_array(self, val=0.):
self.array.fill(val)
# Array Access
def get_flattened(self):
return self.array.flatten()
# Array Access
def get_field(self, dtype, offset):
return self.array.getfield(dtype, offset)
# Array Access
def get_item(self):
return self.array.item()
# Array Access
def get_mem_size(self):
return self.array.nbytes
# Array Access
def get_num_dims(self):
return self.array.ndim
# Array Access
def get_nonzero_indices(self):
return self.array.nonzero()
# Array Operation
def put(self, indices, values, mode):
self.array.put(indices, values, mode)
# Array Operation
def ravel(self):
return self.array.ravel()
# Array Operation
def resize(self, newshape, refcheck=True, order=False):
return numpy.resize(self.array, newshape, refcheck=refcheck, order=order)
# Array Operation
def round(self, precision=0, out=None):
return self.array.round(precision, out)
# Array Operation
def set_field(self, val, dtype, offset):
self.array.set_field(val, dtype, offset)
# Array Access
def get_num_elements(self):
return self.array.size
# Array Operation
def squeeze(self):
return self.array.squeeze()
# Array Operation
def get_standard_deviation(self, axis=None, dtype=None, out=None):
return self.array.std(axis, dtype, out)
# Array Operation
def get_sum(self):
return self.array.sum()
# Array Operation
def swap_axes(self, axis1, axis2):
return self.array.swapaxes(axis1, axis2)
# Array Convert
def tofile(self, file, sep, format="%s"):
self.array.tofile(file, sep, format)
# Array Convert
def tolist(self):
return self.array.tolist()
# Array Convert
def tostring(self, order='C'):
return self.array.tostring(order)
# Array Operation
def get_trace(self, offset, axis1, axis2, dtype=None, out=None):
return self.array.trace(offset, axis1, axis2, dtype, out)
# Array Access
def get_transpose(self):
return self.array.transpose()
# Array Operation
def get_variance(self, axis=None, dtype=None, out=None):
return self.array.var(axis, dtype, out)
# Helper function for assignment
def get_array(self):
return self.array
# Helper function for assignment
def set_array(self, a):
self.array = a
# Array Access
def get_row_range(self, start, to):
return self.array[start:to+1,:]
# Array Access
def get_col_range(self, start, to):
return self.array[:, start:to+1]
| |
from __main__ import vtk, qt, ctk, slicer
import numpy
import copy
from math import *
from slicer.ScriptedLoadableModule import *
import os
import pickle
import time
from slicer.util import VTKObservationMixin
class ModelAddedClass(VTKObservationMixin):
def __init__(self, anglePlanes):
VTKObservationMixin.__init__(self)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeAddedEvent, self.nodeAddedCallback)
self.addObserver(slicer.mrmlScene, slicer.vtkMRMLScene.NodeRemovedEvent, self.nodeRemovedCallback)
self.anglePlanes = anglePlanes
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeAddedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
callData.AddObserver(callData.DisplayModifiedEvent, self.anglePlanes.onChangeModelDisplay)
self.addObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
self.anglePlanes.updateOnSurfaceCheckBoxes()
@vtk.calldata_type(vtk.VTK_OBJECT)
def nodeRemovedCallback(self, caller, eventId, callData):
if isinstance(callData, slicer.vtkMRMLModelNode):
self.removeObserver(callData, callData.PolyDataModifiedEvent, self.onModelNodePolyDataModified)
callData.RemoveObservers(callData.DisplayModifiedEvent)
self.anglePlanes.removeModelPointLocator(callData.GetName())
self.anglePlanes.updateOnSurfaceCheckBoxes()
if isinstance(callData, slicer.vtkMRMLMarkupsFiducialNode):
name = callData.GetName()
planeid = name[len('P'):]
name = "Plane " + planeid
if name in self.anglePlanes.planeControlsDictionary.keys():
self.anglePlanes.RemoveManualPlane(planeid)
def onModelNodePolyDataModified(self, caller, eventId):
self.anglePlanes.addModelPointLocator(caller.GetName(), caller.GetPolyData())
class AnglePlanesMiddleFiducial():
def __init__(self, P1, P2, onSurface, nodeID):
self.P1 = P1
self.P2 = P2
self.onSurface = onSurface
self.nodeID = nodeID
class AnglePlanes(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
parent.title = "Angle Planes"
parent.categories = ["Shape Analysis"]
parent.dependencies = []
parent.contributors = ["Julia Lopinto", "Juan Carlos Prieto", "Francois Budin"]
parent.helpText = """
This Module is used to calculate the angle between two planes by using the normals.
The user gets the choice to use two planes which are already implemented on Slicer
or they can define a plane by using landmarks (at least 3 landmarks).
Plane can also be saved to be reused for other models.
This is an alpha version of the module.
It can't be used for the moment.
"""
parent.acknowledgementText = """
This work was supported by the National
Institutes of Dental and Craniofacial Research
and Biomedical Imaging and Bioengineering of
the National Institutes of Health under Award
Number R01DE024450.
"""
self.parent = parent
class AnglePlanesWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
self.moduleName = "AnglePlanes"
self.i = 0
self.logic = AnglePlanesLogic()
self.planeControlsId = 0
self.planeControlsDictionary = {}
#self.midPointFiducialDictionaryID = {}
# self.logic.initializePlane()
self.ignoredNodeNames = ('Red Volume Slice', 'Yellow Volume Slice', 'Green Volume Slice')
self.colorSliceVolumes = dict()
self.n_vector = numpy.matrix([[0], [0], [1], [1]])
self.interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
# Definition of the 2 planes
# Collapsible button -- Scene Description
self.loadCollapsibleButton = ctk.ctkCollapsibleButton()
self.loadCollapsibleButton.text = "Scene"
self.layout.addWidget(self.loadCollapsibleButton)
# Layout within the laplace collapsible button
self.loadFormLayout = qt.QFormLayout(self.loadCollapsibleButton)
#--------------------------- List of Models --------------------------#
treeView = slicer.qMRMLTreeView()
treeView.setMRMLScene(slicer.app.mrmlScene())
treeView.setSceneModelType('Displayable')
treeView.sceneModel().setHorizontalHeaderLabels(["Models"])
treeView.sortFilterProxyModel().nodeTypes = ['vtkMRMLModelNode']
header = treeView.header()
header.setResizeMode(0, qt.QHeaderView.Stretch)
header.setVisible(True)
self.loadFormLayout.addWidget(treeView)
self.autoChangeLayout = qt.QCheckBox()
self.autoChangeLayout.setCheckState(qt.Qt.Checked)
self.autoChangeLayout.setTristate(False)
self.autoChangeLayout.setText("Automatically change layout to 3D only")
self.loadFormLayout.addWidget(self.autoChangeLayout)
# Add vertical spacer
self.layout.addStretch(1)
#------------------------ Compute Bounding Box ----------------------#
buttonFrameBox = qt.QFrame(self.parent)
buttonFrameBox.setLayout(qt.QHBoxLayout())
self.loadFormLayout.addWidget(buttonFrameBox)
self.computeBox = qt.QPushButton("Compute Bounding Box around all models")
buttonFrameBox.layout().addWidget(self.computeBox)
self.computeBox.connect('clicked()', self.onComputeBox)
self.computeBox.setDisabled(True)
self.CollapsibleButton = ctk.ctkCollapsibleButton()
self.CollapsibleButton.text = "Manage planes"
self.layout.addWidget(self.CollapsibleButton)
self.managePlanesFormLayout = qt.QFormLayout(self.CollapsibleButton)
self.CollapsibleButton.checked = True
# Add planes and manage landmark addition to each plane
addNewPlaneLayout = qt.QHBoxLayout()
addPlaneLabel = qt.QLabel('Add new plane')
self.addPlaneButton = qt.QPushButton(qt.QIcon(":/Icons/MarkupsAddFiducial.png"), " ")
self.addPlaneButton.setFixedSize(50, 25)
self.addPlaneButton.connect('clicked()', self.addNewPlane)
self.addPlaneButton.setEnabled(True)
addNewPlaneLayout.addWidget(addPlaneLabel)
addNewPlaneLayout.addWidget(self.addPlaneButton)
self.managePlanesFormLayout.addRow(addNewPlaneLayout)
# ----------------- Compute Mid Point -------------
self.midPointGroupBox = ctk.ctkCollapsibleButton()
landmark1Layout = qt.QFormLayout()
self.midPointGroupBox.setText('Define middle point between two landmarks')
self.midPointGroupBox.collapsed = True
self.parent.layout().addWidget(self.midPointGroupBox)
self.selectPlaneForMidPoint = qt.QComboBox()
self.selectPlaneForMidPoint.connect('currentIndexChanged(int)', self.onChangeMiddlePointFiducialNode)
landmark1Layout.addRow('Choose plane: ', self.selectPlaneForMidPoint)
self.landmarkComboBox1MidPoint = qt.QComboBox()
self.landmarkComboBox2MidPoint = qt.QComboBox()
landmark1Layout.addRow('Landmark A: ', self.landmarkComboBox1MidPoint)
landmark1Layout.addRow('Landmark B: ', self.landmarkComboBox2MidPoint)
self.midPointOnSurfaceCheckBox = qt.QCheckBox('On Surface')
self.defineMiddlePointButton = qt.QPushButton(' Add middle point ')
self.defineRemoveMiddlePointButton = qt.QPushButton(' Remove middle point ')
middlePointLayout = qt.QHBoxLayout()
middlePointLayout.addWidget(self.defineMiddlePointButton)
middlePointLayout.addWidget(self.defineRemoveMiddlePointButton)
middlePointLayout.addWidget(self.midPointOnSurfaceCheckBox)
landmark1Layout.addRow(middlePointLayout)
self.midPointGroupBox.setLayout(landmark1Layout)
self.midPointGroupBox.setDisabled(True)
self.defineMiddlePointButton.connect('clicked()', self.onAddMidPoint)
self.defineRemoveMiddlePointButton.connect('clicked()', self.onRemoveMidPoint)
self.landmarkComboBox1MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
self.landmarkComboBox2MidPoint.connect('currentIndexChanged(int)', self.onUpdateMidPoint)
self.midPointOnSurfaceCheckBox.connect('stateChanged(int)', self.onSurfaceMidPointStateChanged)
# -------- Calculate angles between planes ------------
self.CollapsibleButtonPlane = ctk.ctkCollapsibleButton()
self.CollapsibleButtonPlane.text = "Choose planes"
self.layout.addWidget(self.CollapsibleButtonPlane)
sampleFormLayoutPlane = qt.QFormLayout(self.CollapsibleButtonPlane)
self.planeComboBox1 = qt.QComboBox()
self.fillColorsComboBox(self.planeComboBox1)
sampleFormLayoutPlane.addRow("Select plane 1: ", self.planeComboBox1)
self.planeComboBox2 = qt.QComboBox()
self.fillColorsComboBox(self.planeComboBox2)
sampleFormLayoutPlane.addRow("Select plane 2: ", self.planeComboBox2)
self.CollapsibleButton2 = ctk.ctkCollapsibleButton()
self.CollapsibleButton2.text = "Results"
self.layout.addWidget(self.CollapsibleButton2)
sampleFormLayout2 = qt.QFormLayout(self.CollapsibleButton2)
self.results = qt.QPushButton("Results")
self.results.connect('clicked()', self.angleValue)
sampleFormLayout2.addWidget(self.results)
label_RL = qt.QLabel("R-L View")
self.getAngle_RL = qt.QLabel("0")
label_SI = qt.QLabel("S-I View")
self.getAngle_SI = qt.QLabel("0")
label_AP = qt.QLabel("A-P View")
self.getAngle_AP = qt.QLabel("0")
self.getAngle_RL_comp = qt.QLabel("0")
self.getAngle_SI_comp = qt.QLabel("0")
self.getAngle_AP_comp = qt.QLabel("0")
tableResult = qt.QTableWidget(3, 3)
tableResult.setColumnCount(3)
tableResult.setHorizontalHeaderLabels([' View ', 'Angle', 'Complementary angle'])
tableResult.setColumnWidth(0, 80)
tableResult.setColumnWidth(1, 80)
tableResult.setColumnWidth(2, 180)
tableResult.setRowCount(1)
tableResult.setCellWidget(0, 0, label_RL)
tableResult.setCellWidget(0, 1, self.getAngle_RL)
tableResult.setCellWidget(0, 2, self.getAngle_RL_comp)
tableResult.setRowCount(2)
tableResult.setCellWidget(1, 0, label_SI)
tableResult.setCellWidget(1, 1, self.getAngle_SI)
tableResult.setCellWidget(1, 2, self.getAngle_SI_comp)
tableResult.setRowCount(3)
tableResult.setCellWidget(2, 0, label_AP)
tableResult.setCellWidget(2, 1, self.getAngle_AP)
tableResult.setCellWidget(2, 2, self.getAngle_AP_comp)
# Add vertical spacer
self.layout.addStretch(1)
sampleFormLayout2.addWidget(tableResult)
self.CollapsibleButton3 = ctk.ctkCollapsibleButton()
self.CollapsibleButton3.text = "Save"
self.layout.addWidget(self.CollapsibleButton3)
sampleFormLayout3 = qt.QFormLayout(self.CollapsibleButton3)
self.CollapsibleButton3.checked = False
buttonFrame = qt.QFrame(self.parent)
buttonFrame.setLayout(qt.QVBoxLayout())
sampleFormLayout3.addWidget(buttonFrame)
#-------------------------------- PLANES --------------------------------#
save_plane = qt.QLabel("Save the planes you create as a txt file.")
buttonFrame.layout().addWidget(save_plane)
save = qt.QPushButton("Save plane")
buttonFrame.layout().addWidget(save)
# load_plane = qt.QLabel("Load the file with the plane you saved.")
# buttonFrame.layout().addWidget(load_plane)
read = qt.QPushButton("Load plane")
buttonFrame.layout().addWidget(read)
#-------------------------------- CONNECTIONS --------------------------------#
self.planeComboBox1.connect('currentIndexChanged(QString)', self.valueComboBox)
self.planeComboBox2.connect('currentIndexChanged(QString)', self.valueComboBox)
# Setting combo boxes at different values/index otherwise infinite loop
self.planeComboBox1.setCurrentIndex(0)
self.planeComboBox2.setCurrentIndex(1)
self.valueComboBox()
save.connect('clicked(bool)', self.onSavePlanes)
read.connect('clicked(bool)', self.onReadPlanes)
slicer.mrmlScene.AddObserver(slicer.mrmlScene.EndCloseEvent, self.onCloseScene)
self.pointLocatorDictionary = {}
for i in self.getPositionOfModelNodes(False):
modelnode = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
self.addModelPointLocator(modelnode.GetName(), modelnode.GetPolyData())
modelnode.AddObserver(modelnode.DisplayModifiedEvent, self.onChangeModelDisplay)
self.middleFiducialDictionary = dict()
ModelAddedClass(self)
self.onUpdateMidPoint()
def canAddMiddlePoint(self):
if self.landmarkComboBox1MidPoint.currentText == self.landmarkComboBox2MidPoint.currentText\
or self.landmarkComboBox1MidPoint.count == 0 or self.landmarkComboBox2MidPoint.count == 0:
return False
else:
return True
def onUpdateMidPoint(self, remove=False):
if self.currentMidPointExists(remove):
self.defineRemoveMiddlePointButton.setDisabled(False)
self.defineMiddlePointButton.setDisabled(True)
else:
self.defineRemoveMiddlePointButton.setDisabled(True)
self.defineMiddlePointButton.setDisabled(False)
disableMiddlePointSurfaceCheckbox = False
if not self.canAddMiddlePoint():
self.defineMiddlePointButton.setDisabled(True)
self.updateOnSurfaceCheckBoxes()
def onSurfaceMidPointStateChanged(self):
key = self.getCurrentMidPointFiducialStructure()
if key != '':
self.middleFiducialDictionary[key].onSurface = self.midPointOnSurfaceCheckBox.isChecked()
if self.selectPlaneForMidPoint.currentText in self.planeControlsDictionary.keys():
self.planeControlsDictionary[self.selectPlaneForMidPoint.currentText].update()
def onChangeMiddlePointFiducialNode(self):
for x in [self.landmarkComboBox1MidPoint, self.landmarkComboBox2MidPoint]:
current = x.currentText
x.clear()
node = self.selectedMiddlePointPlane()
if not node:
return
for i in range(0, node.GetNumberOfMarkups()):
x.addItem(node.GetNthFiducialLabel(i))
if x.findText(current) > -1:
x.setCurrentIndex(x.findText(current))
def onChangeModelDisplay(self, obj, event):
self.updateOnSurfaceCheckBoxes()
def fillColorsComboBox(self, planeComboBox):
planeComboBox.clear()
planeComboBox.addItem("Red")
planeComboBox.addItem("Yellow")
planeComboBox.addItem("Green")
try:
for x in self.planeControlsDictionary.keys():
if self.planeControlsDictionary[x].PlaneIsDefined():
planeComboBox.addItem(x)
except NameError:
dummy = None
def updateOnSurfaceCheckBoxes(self):
numberOfVisibleModels = len(self.getPositionOfModelNodes(True))
# if they are new models and if they are visible, allow to select "on surface" to place new fiducials
if numberOfVisibleModels > 0:
self.computeBox.setDisabled(False)
if self.currentMidPointExists():
key = self.getCurrentMidPointFiducialStructure()
self.midPointOnSurfaceCheckBox.setDisabled(False)
self.midPointOnSurfaceCheckBox.setChecked(self.middleFiducialDictionary[key].onSurface)
else:
self.midPointOnSurfaceCheckBox.setChecked(False)
self.midPointOnSurfaceCheckBox.setDisabled(True)
for x in self.planeControlsDictionary.values():
x.surfaceDeplacementCheckBox.setDisabled(False)
# else there are no visible models or if they are not visible, disable "on surface" to place new fiducials
else:
self.computeBox.setDisabled(True)
self.midPointOnSurfaceCheckBox.setDisabled(True)
self.midPointOnSurfaceCheckBox.setChecked(False)
for x in self.planeControlsDictionary.values():
x.surfaceDeplacementCheckBox.setChecked(False)
x.surfaceDeplacementCheckBox.setDisabled(True)
def getPositionOfModelNodes(self, onlyVisible):
numNodes = slicer.mrmlScene.GetNumberOfNodesByClass("vtkMRMLModelNode")
positionOfNodes = list()
for i in range(0, numNodes):
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
if node.GetName() in self.ignoredNodeNames:
continue
if onlyVisible is True and node.GetDisplayVisibility() == 0:
continue
positionOfNodes.append(i)
return positionOfNodes
def enter(self):
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
self.currentLayout = lm.layout
lm.setLayout(4) # 3D-View
# Show manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, planeControls.slideOpacity.value)
self.valueComboBox()
self.onComputeBox()
def exit(self):
# Remove hidden nodes that are created just for Angle Planes
for x in self.colorSliceVolumes.values():
node = slicer.mrmlScene.GetNodeByID(x)
slicer.mrmlScene.RemoveNode(node)
node.SetHideFromEditors(False)
self.colorSliceVolumes = dict()
# Hide manual planes
for planeControls in self.planeControlsDictionary.values():
if planeControls.PlaneIsDefined():
planeControls.logic.planeLandmarks(planeControls.landmark1ComboBox.currentIndex, planeControls.landmark2ComboBox.currentIndex,
planeControls.landmark3ComboBox.currentIndex, planeControls.slider.value, 0)
# Hide planes
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
# Reset layout
if self.autoChangeLayout.isChecked():
lm = slicer.app.layoutManager()
if lm.layout == 4: # the user has not manually changed the layout
lm.setLayout(self.currentLayout)
def removeModelPointLocator(self, name):
if name in self.pointLocatorDictionary:
print("Removing point locator {0}".format(name))
del self.pointLocatorDictionary[name]
def addModelPointLocator(self, name, polydata):
if name not in self.pointLocatorDictionary and name not in self.ignoredNodeNames:
print "Adding point locator: {0}".format(name)
pointLocator = vtk.vtkPointLocator()
pointLocator.SetDataSet(polydata)
pointLocator.AutomaticOn()
pointLocator.BuildLocator()
self.pointLocatorDictionary[name] = pointLocator
def addNewPlane(self, keyLoad=-1):
if keyLoad != -1:
self.planeControlsId = keyLoad
else:
self.planeControlsId += 1
if len(self.planeControlsDictionary) >= 1:
self.addPlaneButton.setDisabled(True)
planeControls = AnglePlanesWidgetPlaneControl(self, self.planeControlsId, self.pointLocatorDictionary)
self.managePlanesFormLayout.addRow(planeControls)
key = "Plane " + str(self.planeControlsId)
self.planeControlsDictionary[key] = planeControls
self.updatePlanesComboBoxes()
self.midPointGroupBox.setDisabled(False)
self.selectPlaneForMidPoint.addItem(key)
def RemoveManualPlane(self, id):
key = "Plane " + str(id)
# If the plane has already been removed (for example, when removing this plane in this function,
# the callback on removing the nodes will be called, and therefore this function will be called again
# We need to not do anything the second time this function is called for the same plane
if key not in self.planeControlsDictionary.keys():
return
fiducialList = slicer.util.getNode('P' + str(id))
planeControls = self.planeControlsDictionary[key]
self.managePlanesFormLayout.removeWidget(planeControls)
self.planeControlsDictionary[key].deleteLater()
self.planeControlsDictionary.pop(key)
self.addPlaneButton.setDisabled(False)
if len(self.planeControlsDictionary.keys()) == 0:
self.midPointGroupBox.setDisabled(True)
self.midPointGroupBox.collapsed = True
self.updatePlanesComboBoxes()
self.valueComboBox()
if self.selectPlaneForMidPoint.findText(key) > -1:
self.selectPlaneForMidPoint.removeItem(self.selectPlaneForMidPoint.findText(key))
if fiducialList:
# fiducialList.SetDisplayVisibility(False)
fiducialList.RemoveObserver(fiducialList.onFiducialAddedObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialRemovedObserverTag)
fiducialList.RemoveObserver(fiducialList.setPointModifiedEventObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialAddedMidPointObserverTag)
fiducialList.RemoveObserver(fiducialList.onFiducialRemovedMidPointObserverTag)
if planeControls.removeFiducials.checkState() == qt.Qt.Checked:
slicer.app.mrmlScene().RemoveNode(fiducialList)
def onComputeBox(self):
positionOfVisibleNodes = self.getPositionOfModelNodes(True)
if len(positionOfVisibleNodes) == 0:
return
maxValue = slicer.sys.float_info.max
bound = [maxValue, -maxValue, maxValue, -maxValue, maxValue, -maxValue]
for i in positionOfVisibleNodes:
node = slicer.mrmlScene.GetNthNodeByClass(i, "vtkMRMLModelNode")
polydata = node.GetPolyData()
if polydata is None or not hasattr(polydata, "GetBounds"):
continue
tempbound = polydata.GetBounds()
bound[0] = min(bound[0], tempbound[0])
bound[2] = min(bound[2], tempbound[2])
bound[4] = min(bound[4], tempbound[4])
bound[1] = max(bound[1], tempbound[1])
bound[3] = max(bound[3], tempbound[3])
bound[5] = max(bound[5], tempbound[5])
# --------------------------- Box around the model --------------------------#
dim = []
origin = []
for x in range(0, 3):
dim.append(bound[x * 2 + 1] - bound[x * 2])
origin.append(bound[x * 2] + dim[x] / 2)
dim[x] *= 1.1
dictColors = {'Red': 32, 'Yellow': 15, 'Green': 1}
for x in dictColors.keys():
sampleVolumeNode = self.CreateNewNode(x, dictColors[x], dim, origin)
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
compNode.SetBackgroundVolumeID(sampleVolumeNode.GetID())
print "set background" + x
lm = slicer.app.layoutManager()
#Reset and fit 2D-views
lm.resetSliceViews()
for x in dictColors.keys():
logic = lm.sliceWidget(x)
node = logic.mrmlSliceNode()
node.SetSliceResolutionMode(node.SliceResolutionMatch2DView)
logic.fitSliceToBackground()
#Reset pink box around models
for i in range(0, lm.threeDViewCount):
threeDView = lm.threeDWidget(i).threeDView()
threeDView.resetFocalPoint()
#Reset camera in 3D view to center the models and position the camera so that all actors can be seen
threeDView.renderWindow().GetRenderers().GetFirstRenderer().ResetCamera()
def CreateNewNode(self, colorName, color, dim, origin):
# we add a pseudo-random number to the name of our empty volume to avoid the risk of having a volume called
# exactly the same by the user which could be confusing. We could also have used slicer.app.sessionId()
if colorName not in self.colorSliceVolumes.keys():
VolumeName = "AnglePlanes_EmptyVolume_" + str(slicer.app.applicationPid()) + "_" + colorName
# Do NOT set the spacing and the origin of imageData (vtkImageData)
# The spacing and the origin should only be set in the vtkMRMLScalarVolumeNode!!!!!!
# We only create an image of 1 voxel (as we only use it to color the planes
imageData = vtk.vtkImageData()
imageData.SetDimensions(1, 1, 1)
imageData.AllocateScalars(vtk.VTK_UNSIGNED_CHAR, 1)
imageData.SetScalarComponentFromDouble(0, 0, 0, 0, color)
if hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode = slicer.vtkMRMLLabelMapVolumeNode()
else:
sampleVolumeNode = slicer.vtkMRMLScalarVolumeNode()
sampleVolumeNode = slicer.mrmlScene.AddNode(sampleVolumeNode)
sampleVolumeNode.SetName(VolumeName)
labelmapVolumeDisplayNode = slicer.vtkMRMLLabelMapVolumeDisplayNode()
slicer.mrmlScene.AddNode(labelmapVolumeDisplayNode)
colorNode = slicer.util.getNode('GenericAnatomyColors')
labelmapVolumeDisplayNode.SetAndObserveColorNodeID(colorNode.GetID())
sampleVolumeNode.SetAndObserveImageData(imageData)
sampleVolumeNode.SetAndObserveDisplayNodeID(labelmapVolumeDisplayNode.GetID())
labelmapVolumeDisplayNode.VisibilityOn()
self.colorSliceVolumes[colorName] = sampleVolumeNode.GetID()
sampleVolumeNode = slicer.mrmlScene.GetNodeByID(self.colorSliceVolumes[colorName])
sampleVolumeNode.SetOrigin(origin[0], origin[1], origin[2])
sampleVolumeNode.SetSpacing(dim[0], dim[1], dim[2])
if not hasattr(slicer, 'vtkMRMLLabelMapVolumeNode'):
sampleVolumeNode.SetLabelMap(1)
sampleVolumeNode.SetHideFromEditors(True)
sampleVolumeNode.SetSaveWithScene(False)
return sampleVolumeNode
def selectedMiddlePointPlane(self):
if self.selectPlaneForMidPoint.currentText not in self.planeControlsDictionary.keys():
return None
id = self.planeControlsDictionary[self.selectPlaneForMidPoint.currentText].id
markupNodeName = 'P' + str(id)
nodes = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', markupNodeName)
node = nodes.GetItemAsObject(0)
return node
def computeMidPointPosition(self, node, p1ID, p2ID, coord):
f = list()
f.append(type('obj', (object,), {'ID': p1ID, 'coordinates': numpy.zeros(3)}))
f.append(type('obj', (object,), {'ID': p2ID, 'coordinates': numpy.zeros(3)}))
if not node:
return 1
found = 0
for j in (0,1):
fid = node.GetMarkupIndexByID(f[j].ID)
if fid != -1:
current = numpy.zeros(3)
node.GetNthFiducialPosition(fid, current)
f[j].coordinates = current
found += 1
if not found == 2:
print "Error: Fiducials not found in lists"
return 1
current = f[0].coordinates + f[1].coordinates
current /= 2
for i in range(0,3):
coord[i] = current[i]
return 0
def getFiducialIDFromName(self, node, name):
for i in range(0, node.GetNumberOfMarkups()):
if name == node.GetNthFiducialLabel(i):
return node.GetNthMarkupID(i)
return ''
def onAddMidPoint(self):
if self.currentMidPointExists():
print "Mid point already exists"
return
node = self.selectedMiddlePointPlane()
f = list()
f.append(type('obj', (object,), {'name': self.landmarkComboBox1MidPoint.currentText, 'ID': ""}))
f.append(type('obj', (object,), {'name': self.landmarkComboBox2MidPoint.currentText, 'ID': ""}))
for j in (0,1):
f[j].ID = self.getFiducialIDFromName(node, f[j].name)
if '' in [f[0].ID, f[1].ID]:
print "Error: Fiducials not found in lists"
return
coordinates = numpy.zeros(3)
self.computeMidPointPosition(node, f[0].ID, f[1].ID, coordinates)
node.AddFiducial(coordinates[0], coordinates[1], coordinates[2], f[0].name+"-"+f[1].name+"-mid-pt")
newFiducial = node.GetNumberOfMarkups() - 1
node.SetNthFiducialSelected(newFiducial, False)
node.SetNthMarkupLocked(newFiducial, True)
middleFiducial = AnglePlanesMiddleFiducial(f[0].ID, f[1].ID, self.midPointOnSurfaceCheckBox.isChecked(), node.GetID())
self.middleFiducialDictionary[node.GetNthMarkupID(newFiducial)] = middleFiducial
self.onUpdateMidPoint()
def currentMidPointExists(self, remove=False):
for x in self.middleFiducialDictionary.keys():
node = self.selectedMiddlePointPlane()
middleFiducial = self.middleFiducialDictionary[x]
if node.GetID() == middleFiducial.nodeID:
P1 = middleFiducial.P1
P2 = middleFiducial.P2
L1 = self.getFiducialIDFromName(node, self.landmarkComboBox1MidPoint.currentText)
L2 = self.getFiducialIDFromName(node, self.landmarkComboBox2MidPoint.currentText)
if P1 == L1 and P2 == L2 or P1 == L2 and P2 == L1:
if remove is True:
node.RemoveMarkup(node.GetMarkupIndexByID(x))
return False
else:
return True
return False
def getCurrentMidPointFiducialStructure(self):
if self.currentMidPointExists():
for x in self.middleFiducialDictionary.keys():
node = self.selectedMiddlePointPlane()
middleFiducial = self.middleFiducialDictionary[x]
if node.GetID() == middleFiducial.nodeID:
P1 = middleFiducial.P1
P2 = middleFiducial.P2
L1 = self.getFiducialIDFromName(node, self.landmarkComboBox1MidPoint.currentText)
L2 = self.getFiducialIDFromName(node, self.landmarkComboBox2MidPoint.currentText)
if P1 == L1 and P2 == L2 or P1 == L2 and P2 == L1:
return x
return ''
def onRemoveMidPoint(self):
self.onUpdateMidPoint(True)
def onFiducialChangedMidPoint(self, obj, event):
fidlist = obj
node = self.selectedMiddlePointPlane()
if not node or not fidlist == node:
return
self.onChangeMiddlePointFiducialNode()
def fiducialInList(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i):
return True
return False
def onCloseScene(self, obj, event):
self.middleFiducialDictionary = dict()
self.colorSliceVolumes = dict()
keys = self.planeControlsDictionary.keys()
for x in keys[len('Plane '):]:
self.RemoveManualPlane(x)
self.planeControlsDictionary = dict()
# globals()[self.moduleName] = slicer.util.reloadScriptedModule(self.moduleName)
def angleValue(self):
self.valueComboBox()
self.getAngle_RL.setText(self.logic.angle_degre_RL)
self.getAngle_RL_comp.setText(self.logic.angle_degre_RL_comp)
self.getAngle_SI.setText(self.logic.angle_degre_SI)
self.getAngle_SI_comp.setText(self.logic.angle_degre_SI_comp)
self.getAngle_AP.setText(self.logic.angle_degre_AP)
self.getAngle_AP_comp.setText(self.logic.angle_degre_AP_comp)
def setFirstItemInComboBoxNotGivenString(self, comboBox, oldString, noThisString):
if comboBox.findText(oldString) == -1:
allItems = [comboBox.itemText(i) for i in range(comboBox.count)]
for i in allItems:
if i != noThisString:
comboBox.setCurrentIndex(comboBox.findText(i))
break
else:
comboBox.setCurrentIndex(comboBox.findText(oldString))
def updatePlanesComboBoxes(self):
self.planeComboBox1.blockSignals(True)
self.planeComboBox2.blockSignals(True)
colorPlane1 = self.planeComboBox1.currentText
colorPlane2 = self.planeComboBox2.currentText
# Reset Combo boxes
self.fillColorsComboBox(self.planeComboBox1)
self.fillColorsComboBox(self.planeComboBox2)
self.planeComboBox2.removeItem(self.planeComboBox2.findText(colorPlane1))
self.planeComboBox1.removeItem(self.planeComboBox1.findText(colorPlane2))
self.setFirstItemInComboBoxNotGivenString(self.planeComboBox1, colorPlane1, colorPlane2)
self.setFirstItemInComboBoxNotGivenString(self.planeComboBox2, colorPlane2, colorPlane1)
self.planeComboBox1.blockSignals(False)
self.planeComboBox2.blockSignals(False)
def valueComboBox(self):
self.updatePlanesComboBoxes()
# Hide everything before showing what is necessary
for x in self.logic.ColorNodeCorrespondence.keys():
compNode = slicer.util.getNode('vtkMRMLSliceCompositeNode' + x)
compNode.SetLinkedControl(False)
slice = slicer.mrmlScene.GetNodeByID(self.logic.ColorNodeCorrespondence[x])
slice.SetWidgetVisible(False)
slice.SetSliceVisible(False)
colorPlane1 = self.planeComboBox1.currentText
colorPlane2 = self.planeComboBox2.currentText
self.defineAngle(colorPlane1, colorPlane2)
def modify(self, obj, event):
self.defineAngle(self.planeComboBox1.currentText, self.planeComboBox2.currentText)
def defineAngle(self, colorPlane1, colorPlane2):
print "DEFINE ANGLE"
print colorPlane1
if colorPlane1 in self.logic.ColorNodeCorrespondence:
slice1 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane1])
self.logic.getMatrix(slice1)
slice1.SetWidgetVisible(True)
slice1.SetSliceVisible(True)
matrix1 = self.logic.getMatrix(slice1)
normal1 = self.logic.defineNormal(matrix1)
else:
normal1 = self.planeControlsDictionary[colorPlane1].logic.N
print colorPlane2
if colorPlane2 in self.logic.ColorNodeCorrespondence:
slice2 = slicer.util.getNode(self.logic.ColorNodeCorrespondence[colorPlane2])
self.logic.getMatrix(slice2)
slice2.SetWidgetVisible(True)
slice2.SetSliceVisible(True)
matrix2 = self.logic.getMatrix(slice2)
normal2 = self.logic.defineNormal(matrix2)
else:
normal2 = self.planeControlsDictionary[colorPlane2].logic.N
self.logic.getAngle(normal1, normal2)
def onSavePlanes(self):
self.savePlanes()
def savePlanes(self, filename=None):
tempDictionary = {}
sliceRed = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Red'])
tempDictionary["Red"] = self.logic.getMatrix(sliceRed).tolist()
sliceYellow = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Yellow'])
tempDictionary["Yellow"] = self.logic.getMatrix(sliceYellow).tolist()
sliceGreen = slicer.util.getNode(self.logic.ColorNodeCorrespondence['Green'])
tempDictionary["Green"] = self.logic.getMatrix(sliceGreen).tolist()
tempDictionary["customPlanes"] = {}
for key, plane in self.planeControlsDictionary.items():
tempDictionary["customPlanes"][plane.id] = plane.getFiducials()
print filename
if filename is None:
filename = qt.QFileDialog.getSaveFileName(parent=self, caption='Save file')
if filename != "":
fileObj = open(filename, "wb")
pickle.dump(tempDictionary, fileObj)
fileObj.close()
def onReadPlanes(self):
self.readPlanes()
def readPlanes(self, filename=None):
if filename is None:
filename = qt.QFileDialog.getOpenFileName(parent=self, caption='Open file')
if filename != "":
fileObj = open(filename, "rb")
tempDictionary = pickle.load(fileObj)
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeRed')
matList = tempDictionary["Red"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeYellow')
matList = tempDictionary["Yellow"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
node = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceNodeGreen')
matList = tempDictionary["Green"]
matNode = node.GetSliceToRAS()
for col in range(0, len(matList)):
for row in range(0, len(matList[col])):
matNode.SetElement(col, row, matList[col][row])
customPlanes = tempDictionary["customPlanes"]
for key, fidlist in customPlanes.items():
self.addNewPlane(key)
tempkey = "Plane " + str(self.planeControlsId)
currentFidList = self.planeControlsDictionary[tempkey].logic.getFiducialList()
for i in range(0, len(fidlist)):
f = fidlist[i]
currentFidList.AddFiducial(f[0], f[1], f[2])
fileObj.close()
# This widget controls each of the planes that are added to the interface.
# The widget contains its own logic, i.e. an object of AnglePlanesLogic.
# Each plane contains a separate fiducial list. The planes are named P1, P2, ..., PN. The landmarks are named
# P1-1, P1-2, P1-N.
class AnglePlanesWidgetPlaneControl(qt.QFrame):
def __init__(self, anglePlanes, id, pointlocatordictionary):
qt.QFrame.__init__(self)
self.id = id
self.setLayout(qt.QFormLayout())
self.pointLocatorDictionary = pointlocatordictionary
self.logic = AnglePlanesLogic(id)
landmarkLayout = qt.QVBoxLayout()
planeLabelLayout = qt.QHBoxLayout()
planeLabel = qt.QLabel('Plane ' + str(id) + ":")
planeLabelLayout.addWidget(planeLabel)
planeLabelLayout.addStretch()
addFiducialLabel = qt.QLabel('Add')
addFiducialButton = qt.QPushButton(qt.QIcon(":/Icons/AnnotationPointWithArrow.png"), " ")
addFiducialButton.setFixedSize(50, 25)
addFiducialButton.connect('clicked()', self.addLandMarkClicked)
addFiducialButton.setEnabled(True)
planeLabelLayout.addWidget(addFiducialLabel)
planeLabelLayout.addWidget(addFiducialButton)
numberOfNodes = len(anglePlanes.getPositionOfModelNodes(True))
self.surfaceDeplacementCheckBox = qt.QCheckBox("On Surface")
if numberOfNodes > 0:
self.surfaceDeplacementCheckBox.setChecked(True)
else:
self.surfaceDeplacementCheckBox.setDisabled(True)
self.surfaceDeplacementCheckBox.connect('stateChanged(int)', self.onSurfaceDeplacementStateChanged)
planeLabelLayout.addWidget(self.surfaceDeplacementCheckBox)
landmarkLayout.addLayout(planeLabelLayout)
label1Layout = qt.QHBoxLayout()
label1 = qt.QLabel(' L1:')
self.landmark1ComboBox = qt.QComboBox()
landmark1ComboBox = self.landmark1ComboBox
landmark1ComboBox.addItem("Select")
landmark1ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label1Layout.addWidget(label1)
label1Layout.addWidget(landmark1ComboBox)
landmarkLayout.addLayout(label1Layout)
label2Layout = qt.QHBoxLayout()
label2 = qt.QLabel(' L2:')
self.landmark2ComboBox = qt.QComboBox()
landmark2ComboBox = self.landmark2ComboBox
landmark2ComboBox.addItem("Select")
landmark2ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label2Layout.addWidget(label2)
label2Layout.addWidget(landmark2ComboBox)
landmarkLayout.addLayout(label2Layout)
label3Layout = qt.QHBoxLayout()
label3 = qt.QLabel(' L3:')
self.landmark3ComboBox = qt.QComboBox()
landmark3ComboBox = self.landmark3ComboBox
landmark3ComboBox.addItem("Select")
landmark3ComboBox.connect('currentIndexChanged(QString)', self.placePlaneClicked)
label3Layout.addWidget(label3)
label3Layout.addWidget(landmark3ComboBox)
landmarkLayout.addLayout(label3Layout)
# fiducial list for the plane
fidNode = self.logic.getFiducialList()
for i in range(0, fidNode.GetNumberOfFiducials()):
label = fidNode.GetNthFiducialLabel(i)
landmark1ComboBox.addItem(label)
landmark2ComboBox.addItem(label)
landmark3ComboBox.addItem(label)
anglePlanes.landmarkComboBox1MidPoint.addItem(label)
anglePlanes.landmarkComboBox2MidPoint.addItem(label)
#anglePlanes.midPointFiducialDictionaryID[label] = fidNode.GetNthMarkupID(i)
fidNode.onFiducialAddedObserverTag = fidNode.AddObserver(fidNode.MarkupAddedEvent, self.onFiducialAdded)
fidNode.onFiducialRemovedObserverTag = fidNode.AddObserver(fidNode.MarkupRemovedEvent, self.onFiducialRemoved)
fidNode.setPointModifiedEventObserverTag = fidNode.AddObserver(fidNode.PointModifiedEvent,
self.onPointModifiedEvent)
# These observers are in AnglePlaneWidgets, they listen to any fiducial being added
#
fidNode.onFiducialAddedMidPointObserverTag = fidNode.AddObserver(fidNode.MarkupAddedEvent,
anglePlanes.onFiducialChangedMidPoint)
fidNode.onFiducialRemovedMidPointObserverTag = fidNode.AddObserver(fidNode.MarkupRemovedEvent,
anglePlanes.onFiducialChangedMidPoint)
self.layout().addRow(landmarkLayout)
self.slider = ctk.ctkSliderWidget()
slider = self.slider
slider.singleStep = 0.1
slider.minimum = 0.1
slider.maximum = 10
slider.value = 1.0
slider.toolTip = "Set the size of your plane."
self.slideOpacity = ctk.ctkSliderWidget()
slideOpacity = self.slideOpacity
slideOpacity.singleStep = 0.1
slideOpacity.minimum = 0.1
slideOpacity.maximum = 1
slideOpacity.value = 1.0
slideOpacity.toolTip = "Set the opacity of your plane."
slider.connect('valueChanged(double)', self.placePlaneClicked)
slideOpacity.connect('valueChanged(double)', self.placePlaneClicked)
landmarkSliderLayout = qt.QHBoxLayout()
label = qt.QLabel(' Size:')
label2 = qt.QLabel(' Opacity:')
landmarkSliderLayout.addWidget(label)
landmarkSliderLayout.addWidget(self.slider)
landmarkSliderLayout.addWidget(label2)
landmarkSliderLayout.addWidget(self.slideOpacity)
self.HidePlaneCheckBox = qt.QCheckBox("Hide")
self.HidePlaneCheckBox.setChecked(False)
self.HidePlaneCheckBox.connect('stateChanged(int)', self.onHideSurface)
landmarkSliderLayout.addWidget(self.HidePlaneCheckBox)
self.layout().addRow(landmarkSliderLayout)
removeButtonLayout = qt.QHBoxLayout()
removeButtonLayout.addStretch(1)
removePlaneButton = qt.QPushButton("Remove")
removeButtonLayout.addWidget(removePlaneButton)
self.removeFiducials = qt.QCheckBox("Remove Fiducials")
self.removeFiducials.setChecked(True)
removeButtonLayout.addWidget(self.removeFiducials)
self.layout().addRow(removeButtonLayout)
removePlaneButton.connect('clicked(bool)', self.onRemove)
self.anglePlanes = anglePlanes
def PlaneIsDefined(self):
if self.landmark1ComboBox.currentIndex > 0 and self.landmark2ComboBox.currentIndex > 0 and self.landmark3ComboBox.currentIndex > 0:
return True
else:
return False
def onRemove(self):
self.logic.remove()
self.anglePlanes.RemoveManualPlane(self.id)
def onFiducialRemoved(self, obj, event):
fidlist = obj
# Update combo boxes
for i in range(1, self.landmark1ComboBox.count):
found = self.fiducialInList(self.landmark1ComboBox.itemText(i), fidlist)
if not found:
self.landmark1ComboBox.removeItem(i)
self.landmark2ComboBox.removeItem(i)
self.landmark3ComboBox.removeItem(i)
break
# Update middle point dictionary
# Check that the fiducial that was remove was not a middle fiducial
for x in self.anglePlanes.middleFiducialDictionary.keys():
node = slicer.mrmlScene.GetNodeByID(self.anglePlanes.middleFiducialDictionary[x].nodeID)
if node == fidlist:
if node.GetMarkupIndexByID(x) == -1:
print "removing fiducial from middlefiducialDictionary"
del self.anglePlanes.middleFiducialDictionary[x]
# continue
# If fiducial that is removed is one of the two fiducials defining my middle point,
# we also remove the middle point
# If this loop removes a markup, this might start an asynchrone job that might modify
# the dictionary while we iterate. This would be an issue.
middleFiducialDictionary = copy.deepcopy(self.anglePlanes.middleFiducialDictionary)
for x in middleFiducialDictionary.keys():
node = slicer.mrmlScene.GetNodeByID(middleFiducialDictionary[x].nodeID)
p1 = middleFiducialDictionary[x].P1
p2 = middleFiducialDictionary[x].P2
if node.GetMarkupIndexByID(p1) == -1 or node.GetMarkupIndexByID(p2) == -1:
position = node.GetMarkupIndexByID(x)
if position != -1:
print "removing middle fiducial because end point has been removed"
node.RemoveMarkup(position)
# No need to remove it from middleFiducialDictionary here as the previous
# call should trigger the call of this function and remove this markup
# from middleFiducialDictionary for us
def getFiducials(self):
fidNode = self.logic.getFiducialList()
listCoord = list()
coord = numpy.zeros(3)
fidNode.GetNthFiducialPosition(int(self.landmark1ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark2ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
fidNode.GetNthFiducialPosition(int(self.landmark3ComboBox.currentIndex) - 1, coord)
listCoord.append(coord)
return listCoord
def placePlaneClicked(self):
self.anglePlanes.valueComboBox()
self.update()
def fiducialInList(self, name, fidlist):
for i in range(0, fidlist.GetNumberOfFiducials()):
if name == fidlist.GetNthFiducialLabel(i):
return True
return False
def projectAllFiducials(self):
fidlist = self.logic.getFiducialList()
for i in range(0, fidlist.GetNumberOfFiducials()):
fidid = fidlist.GetNthMarkupID(i)
isMiddlePoint = fidid in self.anglePlanes.middleFiducialDictionary.keys()
if not isMiddlePoint:
self.projectFiducialOnClosestSurface(fidlist, i, self.pointLocatorDictionary)
def UpdateMiddlePointsPositions(self):
current = numpy.zeros(3)
for x in self.anglePlanes.middleFiducialDictionary.keys():
middleFiducial = self.anglePlanes.middleFiducialDictionary[x]
if middleFiducial.nodeID == self.logic.getFiducialList().GetID():
node = slicer.mrmlScene.GetNodeByID(middleFiducial.nodeID)
self.anglePlanes.computeMidPointPosition(node, middleFiducial.P1, middleFiducial.P2, current)
node.RemoveObserver(node.setPointModifiedEventObserverTag)
index = node.GetMarkupIndexByID(x)
node.SetNthFiducialPosition(index, current[0], current[1], current[2])
node.setPointModifiedEventObserverTag = node.AddObserver(node.PointModifiedEvent,
self.onPointModifiedEvent)
if middleFiducial.onSurface:
print "middle on surface"
self.projectFiducialOnClosestSurface(node, index, self.pointLocatorDictionary)
def onPointModifiedEvent(self, obj, event):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update()
def onSurfaceDeplacementStateChanged(self):
if self.surfaceDeplacementCheckBox.isChecked():
self.projectAllFiducials()
self.update()
def onHideSurface(self):
if self.PlaneIsDefined():
if self.HidePlaneCheckBox.isChecked():
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value, 0)
else:
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value,
self.slideOpacity.value)
def update(self):
self.UpdateMiddlePointsPositions()
if self.PlaneIsDefined():
self.logic.planeLandmarks(self.landmark1ComboBox.currentIndex, self.landmark2ComboBox.currentIndex,
self.landmark3ComboBox.currentIndex, self.slider.value, self.slideOpacity.value)
def projectFiducialOnClosestSurface(self, fidlist, fidid, pointLocatorDictionary):
landmarkCoord = numpy.zeros(3)
fidlist.GetNthFiducialPosition(fidid, landmarkCoord)
minDistance = slicer.sys.float_info.max
minClosestPoint = numpy.zeros(3)
# print "landmark: " + str(landmarkCoord) + ", fidid: " + str(fidid)
keys = pointLocatorDictionary.keys()
foundCloser = False
for i in range(0, len(keys)):
locator = pointLocatorDictionary[keys[i]]
closestpointid = locator.FindClosestPoint(landmarkCoord)
mrmlmodelcollection = slicer.mrmlScene.GetNodesByClassByName("vtkMRMLModelNode", keys[i])
modelnode = mrmlmodelcollection.GetItemAsObject(0)
if not modelnode:
continue
poly = modelnode.GetPolyData()
if poly is None or not hasattr(poly, 'GetPoints'): # It will be equal to None if object does not contain a polydata
continue
closestpoint = poly.GetPoints().GetPoint(closestpointid)
#print "closestpointid:" + str(closestpointid) + ", point: " + str(closestpoint)
distance = numpy.linalg.norm(closestpoint - landmarkCoord)
#print "distance: " + str(distance)
if distance < minDistance:
foundCloser = True
minDistance = distance
minClosestPoint = closestpoint
if foundCloser:
if minClosestPoint[0] != landmarkCoord[0] or minClosestPoint[1] != landmarkCoord[1] or minClosestPoint[2] != \
landmarkCoord[2]:
fidlist.RemoveObserver(fidlist.setPointModifiedEventObserverTag)
fidlist.SetNthFiducialPosition(fidid, minClosestPoint[0], minClosestPoint[1], minClosestPoint[2])
fidlist.setPointModifiedEventObserverTag = fidlist.AddObserver(fidlist.PointModifiedEvent,
self.onPointModifiedEvent)
def addLandMarkClicked(self):
# print "Add landmarks"
# # Place landmarks in the 3D scene
fidlist = self.logic.getFiducialList()
selectionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLSelectionNodeSingleton")
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLMarkupsFiducialNode")
selectionNode.SetActivePlaceNodeID(fidlist.GetID())
# print selectionNode
interactionNode = slicer.mrmlScene.GetNodeByID("vtkMRMLInteractionNodeSingleton")
interactionNode.SetCurrentInteractionMode(1)
# To select multiple points in the 3D view, we want to have to click
# on the "place fiducial" button multiple times
placeModePersistence = 0
interactionNode.SetPlaceModePersistence(placeModePersistence)
def onFiducialAdded(self, obj, event):
fidlist = obj
label = fidlist.GetNthFiducialLabel(fidlist.GetNumberOfFiducials() - 1)
self.landmark1ComboBox.addItem(label)
self.landmark2ComboBox.addItem(label)
self.landmark3ComboBox.addItem(label)
class AnglePlanesLogic(ScriptedLoadableModuleLogic):
def __init__(self, id=-1):
self.ColorNodeCorrespondence = {'Red': 'vtkMRMLSliceNodeRed',
'Yellow': 'vtkMRMLSliceNodeYellow',
'Green': 'vtkMRMLSliceNodeGreen'}
self.id = id
self.initialize()
def initialize(self):
self.polydata = vtk.vtkPolyData()
self.points = vtk.vtkPoints()
self.planeSource = vtk.vtkPlaneSource()
self.mapper = vtk.vtkPolyDataMapper()
self.actor = vtk.vtkActor()
def remove(self):
renderer = list()
renderWindow = list()
layoutManager = slicer.app.layoutManager()
for i in range(0, layoutManager.threeDViewCount):
threeDWidget = layoutManager.threeDWidget(i)
threeDView = threeDWidget.threeDView()
renderWindow.append(threeDView.renderWindow())
renderers = renderWindow[i].GetRenderers()
renderer.append(renderers.GetFirstRenderer())
renderer[i].RemoveViewProp(self.actor)
renderWindow[i].AddRenderer(renderer[i])
renderer[i].Render()
self.actor.RemoveAllObservers()
self.actor = None
def getFiducialList(self):
P = self.getFiducialListName()
nodes = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', P)
if nodes.GetNumberOfItems() == 0:
# The list does not exist so we create it
fidNode = slicer.vtkMRMLMarkupsFiducialNode()
fidNode.SetName(P)
slicer.mrmlScene.AddNode(fidNode)
else:
# The list exists but the observers must be updated
fidNode = nodes.GetItemAsObject(0)
return fidNode
def getFiducialListName(self):
return "P" + str(self.id)
def getMatrix(self, slice):
self.mat = slice.GetSliceToRAS()
print self.mat
# ---------------------- RED SLICE -----------------------#
# Matrix with the elements of SliceToRAS
m = numpy.matrix([[self.mat.GetElement(0, 0), self.mat.GetElement(0, 1), self.mat.GetElement(0, 2),
self.mat.GetElement(0, 3)],
[self.mat.GetElement(1, 0), self.mat.GetElement(1, 1), self.mat.GetElement(1, 2),
self.mat.GetElement(1, 3)],
[self.mat.GetElement(2, 0), self.mat.GetElement(2, 1), self.mat.GetElement(2, 2),
self.mat.GetElement(2, 3)],
[self.mat.GetElement(3, 0), self.mat.GetElement(3, 1), self.mat.GetElement(3, 2),
self.mat.GetElement(3, 3)]])
return m
def defineNormal(self, matrix):
# Normal vector to the Red slice:
n_vector = numpy.matrix([[0], [0], [1], [1]])
# point on the Red slice:
A = numpy.matrix([[0], [0], [0], [1]])
normalVector = matrix * n_vector
# print "n : \n", normalVector
A = matrix * A
normalVector1 = normalVector
normalVector1[0] = normalVector[0] - A[0]
normalVector1[1] = normalVector[1] - A[1]
normalVector1[2] = normalVector[2] - A[2]
#print normalVector1
return normalVector1
def getAngle(self, normalVect1, normalVect2):
norm1 = sqrt(
normalVect1[0] * normalVect1[0] + normalVect1[1] * normalVect1[1] + normalVect1[2] * normalVect1[2])
# print "norme 1: \n", norm1
norm2 = sqrt(
normalVect2[0] * normalVect2[0] + normalVect2[1] * normalVect2[1] + normalVect2[2] * normalVect2[2])
#print "norme 2: \n", norm2
scalar_product = (
normalVect1[0] * normalVect2[0] + normalVect1[1] * normalVect2[1] + normalVect1[2] * normalVect2[2])
#print "scalar product : \n", scalar_product
angle = acos(scalar_product / (norm1 * norm2))
#print "radian angle : ", angle
angle_degree = angle * 180 / pi
#print "Angle in degree", angle_degree
norm1_RL = sqrt(normalVect1[1] * normalVect1[1] + normalVect1[2] * normalVect1[2])
#print "norme RL: \n", norm1_RL
norm2_RL = sqrt(normalVect2[1] * normalVect2[1] + normalVect2[2] * normalVect2[2])
#print "norme RL: \n", norm2_RL
if (norm1_RL == 0 or norm1_RL == 0):
self.angle_degre_RL = 0
self.angle_degre_RL_comp = 0
else:
scalar_product_RL = (normalVect1[1] * normalVect2[1] + normalVect1[2] * normalVect2[2])
#print "scalar product : \n", scalar_product_RL
angleRL = acos(scalar_product_RL / (norm1_RL * norm2_RL))
#print "radian angle : ", angleRL
self.angle_degre_RL = angleRL * 180 / pi
self.angle_degre_RL = round(self.angle_degre_RL, 2)
#print self.angle_degre_RL
self.angle_degre_RL_comp = 180 - self.angle_degre_RL
norm1_SI = sqrt(normalVect1[0] * normalVect1[0] + normalVect1[1] * normalVect1[1])
#print "norme1_SI : \n", norm1_SI
norm2_SI = sqrt(normalVect2[0] * normalVect2[0] + normalVect2[1] * normalVect2[1])
#print "norme2_SI : \n", norm2_SI
if (norm1_SI == 0 or norm2_SI == 0):
self.angle_degre_SI = 0
self.angle_degre_SI_comp = 0
else:
scalar_product_SI = (normalVect1[0] * normalVect2[0] + normalVect1[1] * normalVect2[1])
#print "scalar product_SI : \n", scalar_product_SI
angleSI = acos(scalar_product_SI / (norm1_SI * norm2_SI))
#print "radian angle : ", angleSI
self.angle_degre_SI = angleSI * 180 / pi
self.angle_degre_SI = round(self.angle_degre_SI, 2)
#print self.angle_degre_SI
self.angle_degre_SI_comp = 180 - self.angle_degre_SI
#print self.angle_degre_SI_comp
norm1_AP = sqrt(normalVect1[0] * normalVect1[0] + normalVect1[2] * normalVect1[2])
#print "norme1_SI : \n", norm1_AP
norm2_AP = sqrt(normalVect2[0] * normalVect2[0] + normalVect2[2] * normalVect2[2])
#print "norme2_SI : \n", norm2_AP
if (norm1_AP == 0 or norm2_AP == 0):
self.angle_degre_AP = 0
self.angle_degre_AP_comp = 0
else:
scalar_product_AP = (normalVect1[0] * normalVect2[0] + normalVect1[2] * normalVect2[2])
#print "scalar product_SI : \n", scalar_product_AP
#print "VALUE :", scalar_product_AP/(norm1_AP*norm2_AP)
angleAP = acos(scalar_product_AP / (norm1_AP * norm2_AP))
#print "radian angle : ", angleAP
self.angle_degre_AP = angleAP * 180 / pi
self.angle_degre_AP = round(self.angle_degre_AP, 2)
#print self.angle_degre_AP
self.angle_degre_AP_comp = 180 - self.angle_degre_AP
def normalLandmarks(self, GA, GB):
Vn = numpy.matrix([[0], [0], [0]])
Vn[0] = GA[1] * GB[2] - GA[2] * GB[1]
Vn[1] = GA[2] * GB[0] - GA[0] * GB[2]
Vn[2] = GA[0] * GB[1] - GA[1] * GB[0]
# print "Vn = ",Vn
norm_Vn = sqrt(Vn[0] * Vn[0] + Vn[1] * Vn[1] + Vn[2] * Vn[2])
Normal = Vn / norm_Vn
#print "N = ",Normal
return Normal
def planeLandmarks(self, Landmark1Value, Landmark2Value, Landmark3Value, slider, sliderOpacity):
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
fidNode = self.getFiducialList()
r1 = 0
a1 = 0
s1 = 0
coord = numpy.zeros(3)
if Landmark1Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark1Value) - 1, coord)
r1 = coord[0]
a1 = coord[1]
s1 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r2 = 0
a2 = 0
s2 = 0
if Landmark2Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark2Value) - 1, coord)
r2 = coord[0]
a2 = coord[1]
s2 = coord[2]
# Limit the number of 3 landmarks to define a plane
# Keep the coordinates of the landmarks
r3 = 0
a3 = 0
s3 = 0
if Landmark3Value != 0:
fidNode.GetNthFiducialPosition(int(Landmark3Value) - 1, coord)
r3 = coord[0]
a3 = coord[1]
s3 = coord[2]
points = self.points
if points.GetNumberOfPoints() == 0:
points.InsertNextPoint(r1, a1, s1)
points.InsertNextPoint(r2, a2, s2)
points.InsertNextPoint(r3, a3, s3)
else:
points.SetPoint(0, r1, a1, s1)
points.SetPoint(1, r2, a2, s2)
points.SetPoint(2, r3, a3, s3)
polydata = self.polydata
polydata.SetPoints(points)
centerOfMass = vtk.vtkCenterOfMass()
centerOfMass.SetInputData(polydata)
centerOfMass.SetUseScalarsAsWeights(False)
centerOfMass.Update()
G = centerOfMass.GetCenter()
# print "Center of mass = ",G
A = (r1, a1, s1)
B = (r2, a2, s2)
C = (r3, a3, s3)
# Vector GA
GA = numpy.matrix([[0], [0], [0]])
GA[0] = A[0] - G[0]
GA[1] = A[1] - G[1]
GA[2] = A[2] - G[2]
#print "GA = ", GA
# Vector BG
GB = numpy.matrix([[0], [0], [0]])
GB[0] = B[0] - G[0]
GB[1] = B[1] - G[1]
GB[2] = B[2] - G[2]
#print "GB = ", GB
# Vector CG
GC = numpy.matrix([[0], [0], [0]])
GC[0] = C[0] - G[0]
GC[1] = C[1] - G[1]
GC[2] = C[2] - G[2]
#print "GC = ", GC
self.N = self.normalLandmarks(GA, GB)
D = numpy.matrix([[0], [0], [0]])
E = numpy.matrix([[0], [0], [0]])
F = numpy.matrix([[0], [0], [0]])
D[0] = slider * GA[0] + G[0]
D[1] = slider * GA[1] + G[1]
D[2] = slider * GA[2] + G[2]
#print "Slider value : ", slider
#print "D = ",D
E[0] = slider * GB[0] + G[0]
E[1] = slider * GB[1] + G[1]
E[2] = slider * GB[2] + G[2]
#print "E = ",E
F[0] = slider * GC[0] + G[0]
F[1] = slider * GC[1] + G[1]
F[2] = slider * GC[2] + G[2]
#print "F = ",F
planeSource = self.planeSource
planeSource.SetNormal(self.N[0], self.N[1], self.N[2])
planeSource.SetOrigin(D[0], D[1], D[2])
planeSource.SetPoint1(E[0], E[1], E[2])
planeSource.SetPoint2(F[0], F[1], F[2])
planeSource.Update()
plane = planeSource.GetOutput()
mapper = self.mapper
mapper.SetInputData(plane)
mapper.Update()
self.actor.SetMapper(mapper)
self.actor.GetProperty().SetColor(0, 0.4, 0.8)
self.actor.GetProperty().SetOpacity(sliderOpacity)
renderer = list()
renderWindow = list()
layoutManager = slicer.app.layoutManager()
for i in range(0, layoutManager.threeDViewCount):
threeDWidget = layoutManager.threeDWidget(i)
threeDView = threeDWidget.threeDView()
renderWindow.append(threeDView.renderWindow())
renderers = renderWindow[i].GetRenderers()
renderer.append(renderers.GetFirstRenderer())
renderer[i].AddViewProp(self.actor)
renderWindow[i].AddRenderer(renderer[i])
renderer[i].Render()
renderWindow[i].Render()
class AnglePlanesTest(ScriptedLoadableModuleTest):
def setUp(self):
# reset the state - clear scene
slicer.mrmlScene.Clear(0)
def runTest(self):
# run all tests needed
self.setUp()
self.test_AnglePlanes()
def test_AnglePlanes(self):
self.delayDisplay('Starting the test')
self.delayDisplay('Adding planes')
widget = AnglePlanesWidget()
widget.addNewPlane()
widget.addNewPlane()
self.delayDisplay('Adding fiducials')
fidlist1 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P1").GetItemAsObject(0)
fidlist1.AddFiducial(10, 10, 10)
fidlist1.AddFiducial(20, 20, 20)
fidlist1.AddFiducial(10, 20, 30)
fidlist2 = slicer.mrmlScene.GetNodesByClassByName('vtkMRMLMarkupsFiducialNode', "P2").GetItemAsObject(0)
fidlist2.AddFiducial(50, 50, 50)
fidlist2.AddFiducial(40, 20, 80)
fidlist2.AddFiducial(10, 40, 20)
self.delayDisplay('Saving planes')
widget.savePlanes("test.p")
self.delayDisplay('Loading planes')
widget.readPlanes("test.p")
self.delayDisplay('Selecting fiducials')
widget.planeControlsDictionary["Plane 1"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 1"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 1"].landmark3ComboBox.setCurrentIndex(3)
widget.planeControlsDictionary["Plane 2"].landmark1ComboBox.setCurrentIndex(1)
widget.planeControlsDictionary["Plane 2"].landmark2ComboBox.setCurrentIndex(2)
widget.planeControlsDictionary["Plane 2"].landmark3ComboBox.setCurrentIndex(3)
self.delayDisplay('Selecting planes')
widget.planeComboBox1.setCurrentIndex(5)
widget.planeComboBox2.setCurrentIndex(6)
self.delayDisplay('Calculating angle')
widget.angleValue()
test = widget.logic.angle_degre_RL != 59.06 or widget.logic.angle_degre_RL_comp != 120.94 or widget.logic.angle_degre_SI != 12.53 or widget.logic.angle_degre_SI_comp != 167.47 or widget.logic.angle_degre_AP != 82.56 or widget.logic.angle_degre_AP_comp != 97.44
self.delayDisplay('Testing angles')
if test:
print "", "Angle", "Complementary"
print "R-L-View", self.logic.angle_degre_RL, self.logic.angle_degre_RL_comp
print "S-I-View", self.logic.angle_degre_SI, self.logic.angle_degre_SI_comp
print "A-P-View", self.logic.angle_degre_AP, self.logic.angle_degre_AP_comp
self.delayDisplay('Test Failure!')
else:
self.delayDisplay('Test passed!')
widget.parent.close()
| |
import datetime
import re
import random
import time
import redis
from utils import log as logging
from django.http import HttpResponse
from django.conf import settings
from django.db import connection
from django.template import Template, Context
from apps.profile.tasks import CleanupUser
from apps.statistics.rstats import round_time
from utils import json_functions as json
class LastSeenMiddleware(object):
def process_response(self, request, response):
if ((request.path == '/' or
request.path.startswith('/reader/refresh_feeds') or
request.path.startswith('/reader/load_feeds') or
request.path.startswith('/reader/feeds'))
and hasattr(request, 'user')
and request.user.is_authenticated()):
hour_ago = datetime.datetime.utcnow() - datetime.timedelta(minutes=60)
ip = request.META.get('HTTP_X_FORWARDED_FOR', None) or request.META['REMOTE_ADDR']
# SUBSCRIBER_EXPIRE = datetime.datetime.utcnow() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
if request.user.profile.last_seen_on < hour_ago:
logging.user(request, "~FG~BBRepeat visitor: ~SB%s (%s)" % (
request.user.profile.last_seen_on, ip))
CleanupUser.delay(user_id=request.user.pk)
elif settings.DEBUG:
logging.user(request, "~FG~BBRepeat visitor (ignored): ~SB%s (%s)" % (
request.user.profile.last_seen_on, ip))
# if request.user.profile.last_seen_on < SUBSCRIBER_EXPIRE:
# request.user.profile.refresh_stale_feeds()
request.user.profile.last_seen_on = datetime.datetime.utcnow()
request.user.profile.last_seen_ip = ip
request.user.profile.save()
return response
class DBProfilerMiddleware:
def process_request(self, request):
setattr(request, 'activated_segments', [])
if ((request.path.startswith('/reader/feed') or
request.path.startswith('/reader/river')) and
random.random() < .01):
request.activated_segments.append('db_profiler')
connection.use_debug_cursor = True
def process_exception(self, request, exception):
if hasattr(request, 'sql_times_elapsed'):
self._save_times(request.sql_times_elapsed)
def process_response(self, request, response):
if hasattr(request, 'sql_times_elapsed'):
self._save_times(request.sql_times_elapsed)
return response
def _save_times(self, db_times):
if not db_times: return
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
pipe = r.pipeline()
minute = round_time(round_to=60)
for db, duration in db_times.items():
key = "DB:%s:%s" % (db, minute.strftime('%s'))
pipe.incr("%s:c" % key)
pipe.expireat("%s:c" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
if duration:
pipe.incrbyfloat("%s:t" % key, duration)
pipe.expireat("%s:t" % key, (minute + datetime.timedelta(days=2)).strftime("%s"))
pipe.execute()
class SQLLogToConsoleMiddleware:
def activated(self, request):
return (settings.DEBUG_QUERIES or
(hasattr(request, 'activated_segments') and
'db_profiler' in request.activated_segments))
def process_response(self, request, response):
if not self.activated(request): return response
if connection.queries:
time_elapsed = sum([float(q['time']) for q in connection.queries])
queries = connection.queries
for query in queries:
if query.get('mongo'):
query['sql'] = "~FM%s: %s" % (query['mongo']['collection'], query['mongo']['query'])
elif query.get('redis'):
query['sql'] = "~FC%s" % (query['redis']['query'])
else:
query['sql'] = re.sub(r'SELECT (.*?) FROM', 'SELECT * FROM', query['sql'])
query['sql'] = re.sub(r'SELECT', '~FYSELECT', query['sql'])
query['sql'] = re.sub(r'INSERT', '~FGINSERT', query['sql'])
query['sql'] = re.sub(r'UPDATE', '~FY~SBUPDATE', query['sql'])
query['sql'] = re.sub(r'DELETE', '~FR~SBDELETE', query['sql'])
t = Template("{% for sql in sqllog %}{% if not forloop.first %} {% endif %}[{{forloop.counter}}] ~FC{{sql.time}}s~FW: {{sql.sql|safe}}{% if not forloop.last %}\n{% endif %}{% endfor %}")
if settings.DEBUG:
logging.debug(t.render(Context({
'sqllog': queries,
'count': len(queries),
'time': time_elapsed,
})))
times_elapsed = {
'sql': sum([float(q['time'])
for q in queries if not q.get('mongo') and
not q.get('redis')]),
'mongo': sum([float(q['time']) for q in queries if q.get('mongo')]),
'redis': sum([float(q['time']) for q in queries if q.get('redis')]),
}
setattr(request, 'sql_times_elapsed', times_elapsed)
return response
SIMPSONS_QUOTES = [
("Homer", "D'oh."),
("Ralph", "Me fail English? That's unpossible."),
("Lionel Hutz", "This is the greatest case of false advertising I've seen since I sued the movie \"The Never Ending Story.\""),
("Sideshow Bob", "No children have ever meddled with the Republican Party and lived to tell about it."),
("Troy McClure", "Don't kid yourself, Jimmy. If a cow ever got the chance, he'd eat you and everyone you care about!"),
("Comic Book Guy", "The Internet King? I wonder if he could provide faster nudity..."),
("Homer", "Oh, so they have Internet on computers now!"),
("Ned Flanders", "I've done everything the Bible says - even the stuff that contradicts the other stuff!"),
("Comic Book Guy", "Your questions have become more redundant and annoying than the last three \"Highlander\" movies."),
("Chief Wiggum", "Uh, no, you got the wrong number. This is 9-1...2."),
("Sideshow Bob", "I'll be back. You can't keep the Democrats out of the White House forever, and when they get in, I'm back on the streets, with all my criminal buddies."),
("Homer", "When I held that gun in my hand, I felt a surge of power...like God must feel when he's holding a gun."),
("Nelson", "Dad didn't leave... When he comes back from the store, he's going to wave those pop-tarts right in your face!"),
("Milhouse", "Remember the time he ate my goldfish? And you lied and said I never had goldfish. Then why did I have the bowl, Bart? *Why did I have the bowl?*"),
("Lionel Hutz", "Well, he's kind of had it in for me ever since I accidentally ran over his dog. Actually, replace \"accidentally\" with \"repeatedly\" and replace \"dog\" with \"son.\""),
("Comic Book Guy", "Last night's \"Itchy and Scratchy Show\" was, without a doubt, the worst episode *ever.* Rest assured, I was on the Internet within minutes, registering my disgust throughout the world."),
("Homer", "I'm normally not a praying man, but if you're up there, please save me, Superman."),
("Homer", "Save me, Jeebus."),
("Mayor Quimby", "I stand by my racial slur."),
("Comic Book Guy", "Oh, loneliness and cheeseburgers are a dangerous mix."),
("Homer", "You don't like your job, you don't strike. You go in every day and do it really half-assed. That's the American way."),
("Chief Wiggum", "Fat Tony is a cancer on this fair city! He is the cancer and I am the...uh...what cures cancer?"),
("Homer", "Bart, with $10,000 we'd be millionaires! We could buy all kinds of useful things like...love!"),
("Homer", "Fame was like a drug. But what was even more like a drug were the drugs."),
("Homer", "Books are useless! I only ever read one book, \"To Kill A Mockingbird,\" and it gave me absolutely no insight on how to kill mockingbirds! Sure it taught me not to judge a man by the color of his skin...but what good does *that* do me?"),
("Chief Wiggum", "Can't you people take the law into your own hands? I mean, we can't be policing the entire city!"),
("Homer", "Weaseling out of things is important to learn. It's what separates us from the animals...except the weasel."),
("Reverend Lovejoy", "Marge, just about everything's a sin. [holds up a Bible] Y'ever sat down and read this thing? Technically we're not supposed to go to the bathroom."),
("Homer", "You know, the one with all the well meaning rules that don't work out in real life, uh, Christianity."),
("Smithers", "Uh, no, they're saying \"Boo-urns, Boo-urns.\""),
("Hans Moleman", "I was saying \"Boo-urns.\""),
("Homer", "Kids, you tried your best and you failed miserably. The lesson is, never try."),
("Homer", "Here's to alcohol, the cause of - and solution to - all life's problems."),
("Homer", "When will I learn? The answers to life's problems aren't at the bottom of a bottle, they're on TV!"),
("Chief Wiggum", "I hope this has taught you kids a lesson: kids never learn."),
("Homer", "How is education supposed to make me feel smarter? Besides, every time I learn something new, it pushes some old stuff out of my brain. Remember when I took that home winemaking course, and I forgot how to drive?"),
("Homer", "Homer no function beer well without."),
("Duffman", "Duffman can't breathe! OH NO!"),
("Grandpa Simpson", "Dear Mr. President, There are too many states nowadays. Please, eliminate three. P.S. I am not a crackpot."),
("Homer", "Old people don't need companionship. They need to be isolated and studied so it can be determined what nutrients they have that might be extracted for our personal use."),
("Troy McClure", "Hi. I'm Troy McClure. You may remember me from such self-help tapes as \"Smoke Yourself Thin\" and \"Get Some Confidence, Stupid!\""),
("Homer", "A woman is a lot like a refrigerator. Six feet tall, 300 pounds...it makes ice."),
("Homer", "Son, a woman is like a beer. They smell good, they look good, you'd step over your own mother just to get one! But you can't stop at one. You wanna drink another woman!"),
("Homer", "Facts are meaningless. You could use facts to prove anything that's even remotely true!"),
("Mr Burns", "I'll keep it short and sweet - Family. Religion. Friendship. These are the three demons you must slay if you wish to succeed in business."),
("Kent Brockman", "...And the fluffy kitten played with that ball of string all through the night. On a lighter note, a Kwik-E-Mart clerk was brutally murdered last night."),
("Ralph", "Mrs. Krabappel and Principal Skinner were in the closet making babies and I saw one of the babies and then the baby looked at me."),
("Apu", "Please do not offer my god a peanut."),
("Homer", "You don't win friends with salad."),
("Mr Burns", "I don't like being outdoors, Smithers. For one thing, there's too many fat children."),
("Sideshow Bob", "Attempted murder? Now honestly, what is that? Do they give a Nobel Prize for attempted chemistry?"),
("Chief Wiggum", "They only come out in the night. Or in this case, the day."),
("Mr Burns", "Whoa, slow down there, maestro. There's a *New* Mexico?"),
("Homer", "He didn't give you gay, did he? Did he?!"),
("Comic Book Guy", "But, Aquaman, you cannot marry a woman without gills. You're from two different worlds... Oh, I've wasted my life."),
("Homer", "Marge, it takes two to lie. One to lie and one to listen."),
("Superintendent Chalmers", "I've had it with this school, Skinner. Low test scores, class after class of ugly, ugly children..."),
("Mr Burns", "What good is money if it can't inspire terror in your fellow man?"),
("Homer", "Oh, everything looks bad if you remember it."),
("Ralph", "Slow down, Bart! My legs don't know how to be as long as yours."),
("Homer", "Donuts. Is there anything they can't do?"),
("Frink", "Brace yourselves gentlemen. According to the gas chromatograph, the secret ingredient is... Love!? Who's been screwing with this thing?"),
("Apu", "Yes! I am a citizen! Now which way to the welfare office? I'm kidding, I'm kidding. I work, I work."),
("Milhouse", "We started out like Romeo and Juliet, but it ended up in tragedy."),
("Mr Burns", "A lifetime of working with nuclear power has left me with a healthy green glow...and left me as impotent as a Nevada boxing commissioner."),
("Homer", "Kids, kids. I'm not going to die. That only happens to bad people."),
("Milhouse", "Look out, Itchy! He's Irish!"),
("Homer", "I'm going to the back seat of my car, with the woman I love, and I won't be back for ten minutes!"),
("Smithers", "I'm allergic to bee stings. They cause me to, uh, die."),
("Barney", "Aaah! Natural light! Get it off me! Get it off me!"),
("Principal Skinner", "That's why I love elementary school, Edna. The children believe anything you tell them."),
("Sideshow Bob", "Your guilty consciences may make you vote Democratic, but secretly you all yearn for a Republican president to lower taxes, brutalize criminals, and rule you like a king!"),
("Barney", "Jesus must be spinning in his grave!"),
("Superintendent Chalmers", "\"Thank the Lord\"? That sounded like a prayer. A prayer in a public school. God has no place within these walls, just like facts don't have a place within an organized religion."),
("Mr Burns", "[answering the phone] Ahoy hoy?"),
("Comic Book Guy", "Oh, a *sarcasm* detector. Oh, that's a *really* useful invention!"),
("Marge", "Our differences are only skin deep, but our sames go down to the bone."),
("Homer", "What's the point of going out? We're just going to wind up back here anyway."),
("Marge", "Get ready, skanks! It's time for the truth train!"),
("Bill Gates", "I didn't get rich by signing checks."),
("Principal Skinner", "Fire can be our friend; whether it's toasting marshmallows or raining down on Charlie."),
("Homer", "Oh, I'm in no condition to drive. Wait a minute. I don't have to listen to myself. I'm drunk."),
("Homer", "And here I am using my own lungs like a sucker."),
("Comic Book Guy", "Human contact: the final frontier."),
("Homer", "I hope I didn't brain my damage."),
("Krusty the Clown", "And now, in the spirit of the season: start shopping. And for every dollar of Krusty merchandise you buy, I will be nice to a sick kid. For legal purposes, sick kids may include hookers with a cold."),
("Homer", "I'm a Spalding Gray in a Rick Dees world."),
("Dr Nick", "Inflammable means flammable? What a country."),
("Homer", "Beer. Now there's a temporary solution."),
("Comic Book Guy", "Stan Lee never left. I'm afraid his mind is no longer in mint condition."),
("Nelson", "Shoplifting is a victimless crime. Like punching someone in the dark."),
("Krusty the Clown", "Kids, we need to talk for a moment about Krusty Brand Chew Goo Gum Like Substance. We all knew it contained spider eggs, but the hantavirus? That came out of left field. So if you're experiencing numbness and/or comas, send five dollars to antidote, PO box..."),
("Milhouse", "I can't go to juvie. They use guys like me as currency."),
("Homer", "Son, when you participate in sporting events, it's not whether you win or lose: it's how drunk you get."),
("Homer", "I like my beer cold, my TV loud and my homosexuals flaming."),
("Apu", "Thank you, steal again."),
("Homer", "Marge, you being a cop makes you the man! Which makes me the woman - and I have no interest in that, besides occasionally wearing the underwear, which as we discussed, is strictly a comfort thing."),
("Ed Begley Jr", "I prefer a vehicle that doesn't hurt Mother Earth. It's a go-cart, powered by my own sense of self-satisfaction."),
("Bart", "I didn't think it was physically possible, but this both sucks *and* blows."),
("Homer", "How could you?! Haven't you learned anything from that guy who gives those sermons at church? Captain Whatshisname? We live in a society of laws! Why do you think I took you to all those Police Academy movies? For fun? Well, I didn't hear anybody laughing, did you? Except at that guy who made sound effects. Makes sound effects and laughs. Where was I? Oh yeah! Stay out of my booze."),
("Homer", "Lisa, vampires are make-believe, like elves, gremlins, and Eskimos."),
]
class SimpsonsMiddleware:
def process_response(self, request, response):
quote = random.choice(SIMPSONS_QUOTES)
source = quote[0].replace(' ', '-')
response["X-%s" % source] = quote[1]
return response
class ServerHostnameMiddleware:
def process_response(self, request, response):
response["X-gunicorn-server"] = settings.SERVER_NAME
return response
class TimingMiddleware:
def process_request(self, request):
setattr(request, 'start_time', time.time())
BANNED_USER_AGENTS = (
'feed reader-background',
'missing',
)
class UserAgentBanMiddleware:
def process_request(self, request):
user_agent = request.environ.get('HTTP_USER_AGENT', 'missing').lower()
if 'profile' in request.path: return
if 'haproxy' in request.path: return
if 'account' in request.path: return
if 'push' in request.path: return
if getattr(settings, 'TEST_DEBUG'): return
if any(ua in user_agent for ua in BANNED_USER_AGENTS):
data = {
'error': 'User agent banned: %s' % user_agent,
'code': -1
}
logging.user(request, "~FB~SN~BBBanned UA: ~SB%s / %s (%s)" % (user_agent, request.path, request.META))
return HttpResponse(json.encode(data), status=403, mimetype='text/json')
| |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from oslo_config import cfg
from six.moves import http_client
from testtools.matchers import HasLength
from ironic.api.controllers import base as api_base
from ironic.api.controllers.v1 import driver
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.api import base
class TestListDrivers(base.FunctionalTest):
d1 = 'fake-driver1'
d2 = 'fake-driver2'
h1 = 'fake-host1'
h2 = 'fake-host2'
def register_fake_conductors(self):
self.dbapi.register_conductor({
'hostname': self.h1,
'drivers': [self.d1, self.d2],
})
self.dbapi.register_conductor({
'hostname': self.h2,
'drivers': [self.d2],
})
def test_drivers(self):
self.register_fake_conductors()
expected = sorted([
{'name': self.d1, 'hosts': [self.h1]},
{'name': self.d2, 'hosts': [self.h1, self.h2]},
], key=lambda d: d['name'])
data = self.get_json('/drivers')
self.assertThat(data['drivers'], HasLength(2))
drivers = sorted(data['drivers'], key=lambda d: d['name'])
for i in range(len(expected)):
d = drivers[i]
self.assertEqual(expected[i]['name'], d['name'])
self.assertEqual(sorted(expected[i]['hosts']), sorted(d['hosts']))
self.validate_link(d['links'][0]['href'])
self.validate_link(d['links'][1]['href'])
def test_drivers_no_active_conductor(self):
data = self.get_json('/drivers')
self.assertThat(data['drivers'], HasLength(0))
self.assertEqual([], data['drivers'])
@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
def test_drivers_get_one_ok(self, mock_driver_properties):
# get_driver_properties mock is required by validate_link()
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.d1,
headers={api_base.Version.string: '1.14'})
self.assertEqual(self.d1, data['name'])
self.assertEqual([self.h1], data['hosts'])
self.assertIn('properties', data.keys())
self.validate_link(data['links'][0]['href'])
self.validate_link(data['links'][1]['href'])
self.validate_link(data['properties'][0]['href'])
self.validate_link(data['properties'][1]['href'])
def test_driver_properties_hidden_in_lower_version(self):
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.d1,
headers={api_base.Version.string: '1.8'})
self.assertNotIn('properties', data.keys())
def test_drivers_get_one_not_found(self):
response = self.get_json('/drivers/%s' % self.d1, expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def _test_links(self, public_url=None):
cfg.CONF.set_override('public_endpoint', public_url, 'api')
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.d1)
self.assertIn('links', data.keys())
self.assertEqual(2, len(data['links']))
self.assertIn(self.d1, data['links'][0]['href'])
for l in data['links']:
bookmark = l['rel'] == 'bookmark'
self.assertTrue(self.validate_link(l['href'], bookmark=bookmark))
if public_url is not None:
expected = [{'href': '%s/v1/drivers/%s' % (public_url, self.d1),
'rel': 'self'},
{'href': '%s/drivers/%s' % (public_url, self.d1),
'rel': 'bookmark'}]
for i in expected:
self.assertIn(i, data['links'])
def test_links(self):
self._test_links()
def test_links_public_url(self):
self._test_links(public_url='http://foo')
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_sync(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = {
'return': {'return_key': 'return_value'},
'async': False,
'attach': False}
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(http_client.OK, response.status_int)
self.assertEqual(mocked_driver_vendor_passthru.return_value['return'],
response.json)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_async(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = {'return': None,
'async': True,
'attach': False}
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertIsNone(mocked_driver_vendor_passthru.return_value['return'])
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_put(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': None, 'async': True, 'attach': False}
mocked_driver_vendor_passthru.return_value = return_value
response = self.put_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertEqual(return_value['return'], response.json)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_get(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': 'foo', 'async': False, 'attach': False}
mocked_driver_vendor_passthru.return_value = return_value
response = self.get_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1)
self.assertEqual(return_value['return'], response)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_delete(self, mock_driver_vendor_passthru):
self.register_fake_conductors()
return_value = {'return': None, 'async': True, 'attach': False}
mock_driver_vendor_passthru.return_value = return_value
response = self.delete(
'/drivers/%s/vendor_passthru/do_test' % self.d1)
self.assertEqual(http_client.ACCEPTED, response.status_int)
self.assertEqual(return_value['return'], response.json)
def test_driver_vendor_passthru_driver_not_found(self):
# tests when given driver is not found
# e.g. get_topic_for_driver fails to find the driver
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, response.status_int)
def test_driver_vendor_passthru_method_not_found(self):
response = self.post_json(
'/drivers/%s/vendor_passthru' % self.d1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(http_client.BAD_REQUEST, response.status_int)
error = json.loads(response.json['error_message'])
self.assertEqual('Missing argument: "method"',
error['faultstring'])
@mock.patch.object(rpcapi.ConductorAPI,
'get_driver_vendor_passthru_methods')
def test_driver_vendor_passthru_methods(self, get_methods_mock):
self.register_fake_conductors()
return_value = {'foo': 'bar'}
get_methods_mock.return_value = return_value
path = '/drivers/%s/vendor_passthru/methods' % self.d1
data = self.get_json(path)
self.assertEqual(return_value, data)
get_methods_mock.assert_called_once_with(mock.ANY, self.d1,
topic=mock.ANY)
# Now let's test the cache: Reset the mock
get_methods_mock.reset_mock()
# Call it again
data = self.get_json(path)
self.assertEqual(return_value, data)
# Assert RPC method wasn't called this time
self.assertFalse(get_methods_mock.called)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties(self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.d1
data = self.get_json(path,
headers={api_base.Version.string: "1.12"})
self.assertEqual(properties, data)
disk_prop_mock.assert_called_once_with(mock.ANY, self.d1,
topic=mock.ANY)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_older_version(self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.d1
ret = self.get_json(path,
headers={api_base.Version.string: "1.4"},
expect_errors=True)
self.assertEqual(406, ret.status_code)
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_cached(self, disk_prop_mock):
# only one RPC-conductor call will be made and the info cached
# for subsequent requests
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
properties = {'foo': 'description of foo'}
disk_prop_mock.return_value = properties
path = '/drivers/%s/raid/logical_disk_properties' % self.d1
for i in range(3):
data = self.get_json(path,
headers={api_base.Version.string: "1.12"})
self.assertEqual(properties, data)
disk_prop_mock.assert_called_once_with(mock.ANY, self.d1,
topic=mock.ANY)
self.assertEqual(properties, driver._RAID_PROPERTIES[self.d1])
@mock.patch.object(rpcapi.ConductorAPI, 'get_raid_logical_disk_properties')
def test_raid_logical_disk_properties_iface_not_supported(
self, disk_prop_mock):
driver._RAID_PROPERTIES = {}
self.register_fake_conductors()
disk_prop_mock.side_effect = iter(
[exception.UnsupportedDriverExtension(
extension='raid', driver='fake')])
path = '/drivers/%s/raid/logical_disk_properties' % self.d1
ret = self.get_json(path,
headers={api_base.Version.string: "1.12"},
expect_errors=True)
self.assertEqual(404, ret.status_code)
self.assertTrue(ret.json['error_message'])
disk_prop_mock.assert_called_once_with(mock.ANY, self.d1,
topic=mock.ANY)
@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
@mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for_driver')
class TestDriverProperties(base.FunctionalTest):
def test_driver_properties_fake(self, mock_topic, mock_properties):
# Can get driver properties for fake driver.
driver._DRIVER_PROPERTIES = {}
driver_name = 'fake'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_cached(self, mock_topic, mock_properties):
# only one RPC-conductor call will be made and the info cached
# for subsequent requests
driver._DRIVER_PROPERTIES = {}
driver_name = 'fake'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_invalid_driver_name(self, mock_topic,
mock_properties):
# Cannot get driver properties for an invalid driver; no RPC topic
# exists for it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'bad_driver'
mock_topic.side_effect = exception.DriverNotFound(
driver_name=driver_name)
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
self.assertFalse(mock_properties.called)
def test_driver_properties_cannot_load(self, mock_topic, mock_properties):
# Cannot get driver properties for the driver. Although an RPC topic
# exists for it, the conductor wasn't able to load it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'driver'
mock_topic.return_value = 'driver_topic'
mock_properties.side_effect = exception.DriverNotFound(
driver_name=driver_name)
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(http_client.NOT_FOUND, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
| |
# -*- coding: utf-8 -*-
"""
Holds functions that compute implication covers for a given context
"""
import copy
import closure_operators
from fca.implication import Implication
import fca
def compute_implication_cover(cxt, close=closure_operators.closure):
"""
Compute an implication cover for a given *cxt* using
an object-incremental algorithm
"""
attributes = set(cxt.attributes)
basis = [Implication(set(), attributes.copy())]
i = 0
for intent in cxt.examples():
i += 1
print 'object ', i
# print_basis(basis)
# print 'Adding ', intent
# raw_input()
basis = updated_basis(intent, basis, attributes)
print len(basis), 'implications'
return basis
def print_basis(basis):
print '***'
for imp in basis:
print imp
print '+++'
def is_redundant(imp, basis, close=closure_operators.simple_closure):
return imp.conclusion <= close(imp.premise, basis)
def is_new(imp, implications):
if imp.conclusion <= imp.premise:
return False
for i in implications:
if is_subsumed_simply(imp, i):
return False
elif is_subsumed(imp, i):
print 'ALERT: %s is sumbsumed by %s' %(imp, i)
#raise 'ALERT: %s is sumbsumed by %s' %(imp, i)
elif is_subsumed(i, imp):
print 'ALERT: %s sumbsumes %s' % (imp, i)
#raise 'ALERT: %s sumbsumes %s' % (imp, i)
return True
def is_subsumed_simply(imp, by_imp):
return by_imp.premise <= imp.premise and imp.conclusion <= by_imp.conclusion
def is_subsumed(imp, by_imp):
return (by_imp.premise <= imp.premise and
imp.conclusion <= (by_imp.conclusion | imp.premise))
def remove_subsumed_plus(imp, implications):
if imp.conclusion <= imp.premise:
return False
for i in implications[:]:
if i.premise == imp.premise:
i.conclusion.update(imp.conclusion)
return False
elif i.premise <= imp.premise:
if imp.conclusion <= i.conclusion:
return False
else:
imp.conclusion.update(i.conclusion)
elif imp.premise <= i.premise:
if i.conclusion <= imp.conclusion:
implications.remove(i)
else:
i.conclusion.update(imp.conclusion)
return True
def remove_subsumed(imp, implications):
if imp.conclusion <= imp.premise:
return False
can_be_subsumed = True
for i in implications[:]:
if can_be_subsumed and i.premise <= imp.premise:
if imp.conclusion <= i.conclusion:
return False
else:
imp.conclusion.update(i.conclusion)
elif is_subsumed_simply(i, imp):
implications.remove(i)
can_be_subsumed = False
return True
def remove_subsumed_simply(imp, implications):
if imp.conclusion <= imp.premise:
return False
can_be_subsumed = True
for i in implications[:]:
if can_be_subsumed and is_subsumed_simply(imp, i):
return False
elif is_subsumed_simply(i, imp):
implications.remove(i)
can_be_subsumed = False
return True
def add_smartly(imp, new_valid, valid):
if is_new(imp, valid) and remove_subsumed_simply(imp, new_valid):
new_valid.append(imp)
def updated_basis(intent, basis, attributes):
valid = []
invalid = []
for imp in basis:
if not imp.premise <= intent or imp.conclusion <= intent:
valid.append(imp)
else:
invalid.append(imp)
new_valid = []
for imp in invalid:
new_imp = Implication(imp.premise, imp.conclusion & intent)
add_smartly(new_imp, new_valid, valid)
valid += new_valid
new_valid = []
for imp in invalid:
for a in attributes - intent:
aset = set([a])
new_imp = Implication(imp.premise | aset, imp.conclusion | aset)
if (remove_subsumed(new_imp, valid) and remove_subsumed_plus(new_imp, new_valid)):
new_valid.append(new_imp)
return valid + new_valid
def minimize(cover, close=closure_operators.simple_closure):
# i = 0
# for imp in cover:
# i += 1
# print 'maximizing conclusion ', i
# imp._conclusion = close(imp.premise | imp.conclusion, cover)
i = 0
for imp in cover[:]:
i += 1
print 'maximizing premise ', i
cover.remove(imp)
imp._premise = close(imp.premise, cover)
if not imp.conclusion <= imp.premise:
cover.append(imp)
print len(cover), 'implications'
if __name__ == "__main__":
objects = ['Air Canada', 'Air New Zeland', 'All Nippon Airways',
'Ansett Australia', 'The Australian Airlines Group',
'British Midland', 'Lufthansa', 'Mexicana',
'Scandinavian Airlines', 'Singapore Airlines',
'Thai Airways International', 'United Airlines',
'VARIG']
attributes = ['Latin America', 'Europe', 'Canada', 'Asia Pasific',
'Middle East', 'Africa', 'Mexico', 'Carribean',
'United States']
table = [[True, True, True, True, True, False, True, True, True],
[False, True, False, True, False, False, False, False, True],
[False, True, False, True, False, False, False, False, True],
[False, False, False, True, False, False, False, False, False],
[False, True, True, True, True, True, False, False, True],
[False, True, False, False, False, False, False, False, False],
[True, True, True, True ,True, True, True, False, True],
[True, False, True, False, False, False, True, True, True],
[True, True, False, True, False, True, False, False, True],
[False, True, True, True, True, True, False, False, True],
[True, True, False, True, False, False, False, True, True],
[True, True, True, True, False, False, True, True, True],
[True, True, False, True, False, True, True, False, True]]
cxt = fca.Context(table, objects, attributes)
imp_basis = compute_implication_cover(cxt, closure_operators.closure)
print_basis(imp_basis)
minimize(imp_basis)
print(len(imp_basis))
for imp in imp_basis:
print imp
print '***'
objects = [1, 2, 3, 4]
attributes = ['a', 'b', 'c', 'd']
table = [[True, False, True, True],
[True, False, True, False],
[False, True, True, False],
[False, True, False, True]]
cxt = fca.Context(table, objects, attributes)
imp_basis = compute_implication_cover(cxt, closure_operators.closure)
print_basis(imp_basis)
minimize(imp_basis)
print(len(imp_basis))
for imp in imp_basis:
print imp
| |
import collections
import pandas as pd
import numpy as np
from openpathsampling.netcdfplus import StorableNamedObject
class ChannelAnalysis(StorableNamedObject):
"""Analyze path sampling simulation for multiple channels.
User defines several channels (e.g., mechanisms) as :class:`.Ensemble`
objects. This checks which channels each path satisfies, and provides
analysis of switching and residence.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to analyze
channels: dict of {string: :class:`.Ensemble`}
names (keys) and ensembles (values) representing subtrajectories of the
channels of interest
replica: int
replica ID to analyze from the steps, default is 0.
"""
def __init__(self, steps, channels, replica=0):
super(ChannelAnalysis, self).__init__()
self.channels = channels
if steps is None:
steps = []
self.replica = replica
self._treat_multiples = 'all'
self._results = {c: [] for c in list(self.channels.keys()) + [None]}
if len(steps) > 0:
self._analyze(steps)
def to_dict(self):
return {
'results': self._results,
'treat_multiples': self._treat_multiples,
'channels': self.channels,
'replica': self.replica
}
@classmethod
def from_dict(cls, dct):
obj = cls(steps=None,
channels=dct['channels'],
replica=dct['replica'])
obj._results = dct['results']
obj._treat_multiples = dct['treat_multiples']
return obj
# separate this because I think much of the code might be generalized
# later where step_num could be something else
@staticmethod
def _step_num(step):
"""Return ordinal number for the given input object.
Abstracted so that other things might replace it.
Parameters
----------
step : :class:`.MCStep`
the step
Returns
-------
int :
MC cycle number
"""
return step.mccycle
def _analyze(self, steps):
"""Primary analysis routine.
Converts the input steps to an internal ._results dictionary of
channel name to list of (start, end) tuples for when that channel is
occupied.
Parameters
----------
steps : iterable of :class:`.MCStep`
the steps to analyze
"""
# for now, this assumes only one ensemble per channel
# (would like that to change in the future)
prev_traj = None
last_start = {c: None for c in self._results}
for step in steps:
step_num = self._step_num(step)
traj = step.active[self.replica].trajectory
if prev_traj is None:
prev_result = {c: len(self.channels[c].split(traj)) > 0
for c in self.channels}
prev_result[None] = not any(prev_result.values())
for c in last_start:
if prev_result[c] is True:
last_start[c] = step_num
# re-use previous if the trajectory hasn't changed
if traj is prev_traj:
result = prev_result
else:
result = {c: len(self.channels[c].split(traj)) > 0
for c in self.channels}
result[None] = not any(result.values())
changed = [c for c in result if result[c] != prev_result[c]]
for c in changed:
if result[c] is True:
# switched from False to True: entered this label
last_start[c] = step_num
else:
# switched from True to False: exited this label
finish = step_num
self._results[c] += [(last_start[c], finish)]
last_start[c] = None
prev_traj = traj
prev_result = result
# finish off any extras
next_step = step_num + 1 # again, this can be changed
for c in self._results:
if last_start[c] is not None:
if len(self._results[c]) > 0:
# don't do double it if it's already there
if self._results[c][-1][1] != step_num:
self._results[c] += [(last_start[c], next_step)]
# note: is the else: of the above even possible?
# namely, do we need the if statement? should test that
else:
self._results[c] += [(last_start[c], next_step)]
@property
def treat_multiples(self):
"""
string : method for handling paths that match multiple channels
Allowed values are:
* 'newest': use the most recent channel entered
* 'oldest': use the least recent channel entered
* 'multiple': treat multiple channels as a new type of channel, e.g.,
'a' and 'b' becomes 'a,b'
* 'all': treat each channel individually, despite overlaps. For
switching, this is the same as ???. For status, this is the
same as 'multiple'
"""
return self._treat_multiples
@treat_multiples.setter
def treat_multiples(self, value):
value = value.lower()
if value not in ['all', 'newest', 'oldest', 'multiple']:
raise ValueError("Invalid value for treat_multiples: " +
str(value))
self._treat_multiples = value
@staticmethod
def _expand_results(results):
"""
Takes ._results dict and makes it into chronological list of events
Note
----
The output of this is in terms of "channel events." It doesn't
do anything to ensure that only one channel is defined at a
given time --- so subsequent events can include overlapping
times. Other functions relabel by time.
See also
--------
_labels_by_step_newest
_labels_by_step_oldest
_labels_by_step_multiple
Parameters
----------
results : dict of {str: [(int, int), ...]}
the results dictionary. The keys are the channel names, and the
values are a list of tuples representing the start and finish
step numbers for the range of steps while in this channel
Returns
-------
list of 3-tuples (int, int, frozenset) :
the "events": each event is the tuple of start step, finish
step, and channel name (as a frozenset containing one string),
sorted according to the start step.
"""
expanded = [(domain[0], domain[1], frozenset([channel]))
for channel in results for domain in results[channel]]
return sorted(expanded, key=lambda tup: tup[0])
@staticmethod
def _labels_by_step_newest(expanded_results):
"""
Makes one channel per step, based on most recent channel entered.
See also
--------
_expand_results
_labels_by_step_oldest
_labels_by_step_multiple
labels_by_step
Parameters
----------
expanded_results : list of 3-tuples (int, int, frozenset)
input events; the output of _expand_results (see details there)
Returns
-------
list of 3-tuples (int, int, frozenset)
events with ranges such that there is only one channel at any
given step number, and that channel is the most recent entered
"""
relabeled = []
previous = expanded_results[0]
for current in expanded_results[1:]:
relabeled += [(previous[0], current[0], previous[2])]
previous = current
relabeled += [expanded_results[-1]]
return relabeled
@staticmethod
def _labels_by_step_oldest(expanded_results):
"""
Makes one channel per step, based on least recent channel entered.
See also
--------
_expand_results
_labels_by_step_newest
_labels_by_step_multiple
labels_by_step
Parameters
----------
expanded_results : list of 3-tuples (int, int, frozenset)
input events; the output of _expand_results (see details there)
Returns
-------
list of 3-tuples (int, int, frozenset) :
events with ranges such that there is only one channel at any
given step number, and that channel is the least recent entered
"""
relabeled = []
previous = expanded_results[0]
for current in expanded_results[1:]:
if current[1] > previous[1]:
# ends after last one ended
# if this isn't true, this one gets skipped
# if it is true, then previous is used
relabeled += [previous]
# save the new starting point
previous = (previous[1], current[1], current[2])
# NOTE: Tests include a case for the implicit "else" here, but
# for some reason an empty `else: pass` doesn't show up as
# covering the `pass` line; so removed them
if relabeled[-1] != previous:
relabeled += [previous]
else:
pass # for testing
return relabeled
@staticmethod
def _labels_by_step_multiple(expanded_results):
"""Makes one channel label per step, combining all active channels.
See also
--------
_expand_results
_labels_by_step_newest
_labels_by_step_oldest
labels_by_step
Parameters
----------
expanded_results : list of 3-tuples (int, int, frozenset)
input events; the output of _expand_results (see details there)
Returns
-------
list of 3-tuples (int, int, frozenset) :
events such that there is only one event at any given step
number, with the channel label as the set of all active channels
"""
relabeled = []
# start events are times when a channel is added to the active
# finish events are when channel is removed from the active
# both are dicts of time to a set of channels
start_events = collections.defaultdict(set)
finish_events = collections.defaultdict(set)
for event in expanded_results:
start_events[event[0]] |= set(event[2])
finish_events[event[1]] |= set(event[2])
all_event_steps = set(start_events.keys()) | set(finish_events.keys())
active_channels = set([])
prev_step_num = None
# note to self: this is some elegant freaking code
for step_num in sorted(list(all_event_steps)):
if prev_step_num is not None:
relabeled += [(prev_step_num, step_num,
frozenset(active_channels))]
# defaultdict gives empty if doesn't exist
active_channels -= finish_events[step_num]
active_channels |= start_events[step_num]
prev_step_num = step_num
return relabeled
def labels_by_step(self, treat_multiples=None):
"""
Prepare internally stored results for primary analysis routines.
Note
----
The results of this depend on the value of ``treat_multiples``.
In fact, this method is just a switch for the specific
``treat_multiples`` values.
See also
--------
_labels_by_step_newest
_labels_by_step_oldest
_labels_by_step_multiple
Parameters
----------
treat_multiples : 'all', 'newest', 'oldest', 'multiple', or None
method to prepare output; see documentation on
``treat_multiples`` for details. Default is `None`, which
uses the value in ``self.treat_multiples``.
Returns
-------
list of 3-tuples (int, int, frozenset) :
events such that there is only one event at any give step
"""
if treat_multiples is None:
treat_multiples = self.treat_multiples
expanded_results = self._expand_results(self._results)
method = {
'all': lambda x: x,
'newest': self._labels_by_step_newest,
'oldest': self._labels_by_step_oldest,
'multiple': self._labels_by_step_multiple
}[treat_multiples]
return method(expanded_results)
@staticmethod
def _labels_as_sets_sort_function(label):
"""Sort function for labels.
The input labels are frozensets of lists of strings. The sort order
is first by number of items in the list, and then by the list items.
A list of None will be sorted into the first place.
Parameters
----------
label: frozenset of list of (string or None)
input label
Returns
-------
list:
first element is the length of the input set, followed by
the input as a sorted list
"""
label_list = list(label)
if None in label_list:
has_None = [None]
label_list.remove(None)
else:
has_None = []
ll = sorted(label_list)
return [len(ll)] + has_None + ll
@staticmethod
def label_to_string(label):
"""Convert set of string/None to comma-separated string.
For example, frozenset(['c', 'a']) becomes 'a,c' (no space).
Parameters
----------
label: frozenset of list of (string or None)
input label
Returns
-------
string:
the string for this label
"""
# separated for reusability
return ",".join(sorted([str(l) for l in list(label)]))
@property
def switching_matrix(self):
"""
pandas.DataFrame :
number of switches from one channel to another. Depends on
``treat_multiples``, see details there.
"""
labeled_results = self.labels_by_step()
labels_in_order = [ll[2] for ll in labeled_results]
labels_set = set(labels_in_order)
sorted_set_labels = sorted(list(labels_set),
key=self._labels_as_sets_sort_function)
sorted_labels = [self.label_to_string(e) for e in sorted_set_labels]
switches = [(self.label_to_string(labels_in_order[i]),
self.label_to_string(labels_in_order[i+1]))
for i in range(len(labeled_results)-1)]
switch_count = collections.Counter(switches)
df = pd.DataFrame(index=sorted_labels, columns=sorted_labels)
for switch in switch_count:
df.at[switch[0], switch[1]] = switch_count[switch]
df = df.fillna(0)
return df
@property
def residence_times(self):
"""
Dict[string, List[int]] :
number of steps spent in each channel for each "stay" in that
channel; allows calculations of distribution properties. Depends
on ``treat_multiples``, see details there.
"""
labeled_results = self.labels_by_step()
durations = [(self.label_to_string(step[2]), step[1] - step[0])
for step in labeled_results]
results = collections.defaultdict(list)
for dur in durations:
results[dur[0]] += [dur[1]]
return results
@property
def total_time(self):
"""
Dict[string, int] :
total number of steps spent in each channel for each "stay" in
that channel. Depends on ``treat_multiples``, see details there.
"""
residences = self.residence_times
results = collections.defaultdict(int)
for channel in residences:
results[channel] = sum(residences[channel])
return results
def status(self, step_number):
"""Reports which channel(s) are associated with a given step number.
Note
----
Results will depend on the value of ``treat_multiples``. See
details in the documentation for that.
Parameters
----------
step_number : int
the step number of interest
Returns
-------
string :
the string label for the channel(s)
"""
treat_multiples = self.treat_multiples
if self.treat_multiples == 'all':
treat_multiples = 'multiple'
labeled_results = self.labels_by_step(treat_multiples)
for step in labeled_results:
if step[0] <= step_number < step[1]:
return self.label_to_string(step[2])
raise RuntimeError("Step " + str(step_number) + " outside of range."
+ " Max step: " + str(labeled_results[-1][1]))
| |
from __future__ import absolute_import, division, print_function
import locale
import re
import os
import sys
import stat
from glob import glob
from os.path import (basename, dirname, join, splitext, isdir, isfile, exists,
islink, realpath, relpath)
try:
from os import readlink
except ImportError:
readlink = False
import io
from subprocess import call, Popen, PIPE
from collections import defaultdict
from conda_build.config import config
from conda_build import external
from conda_build import environ
from conda_build import utils
from conda_build import source
from conda.compat import lchmod
from conda.misc import walk_prefix
from conda.utils import md5_file
if sys.platform.startswith('linux'):
from conda_build import elf
elif sys.platform == 'darwin':
from conda_build import macho
SHEBANG_PAT = re.compile(r'^#!.+$', re.M)
def is_obj(path):
assert sys.platform != 'win32'
return bool((sys.platform.startswith('linux') and elf.is_elf(path)) or
(sys.platform == 'darwin' and macho.is_macho(path)))
def fix_shebang(f, osx_is_app=False):
path = join(config.build_prefix, f)
if is_obj(path):
return
elif os.path.islink(path):
return
with io.open(path, encoding=locale.getpreferredencoding()) as fi:
try:
data = fi.read()
except UnicodeDecodeError: # file is binary
return
m = SHEBANG_PAT.match(data)
if not (m and 'python' in m.group()):
return
py_exec = ('/bin/bash ' + config.build_prefix + '/bin/python.app'
if sys.platform == 'darwin' and osx_is_app else
config.build_prefix + '/bin/' + basename(config.build_python))
new_data = SHEBANG_PAT.sub('#!' + py_exec, data, count=1)
if new_data == data:
return
print("updating shebang:", f)
with io.open(path, 'w', encoding=locale.getpreferredencoding()) as fo:
fo.write(new_data)
os.chmod(path, int('755', 8))
def write_pth(egg_path):
fn = basename(egg_path)
with open(join(environ.get_sp_dir(),
'%s.pth' % (fn.split('-')[0])), 'w') as fo:
fo.write('./%s\n' % fn)
def remove_easy_install_pth(files, preserve_egg_dir=False):
"""
remove the need for easy-install.pth and finally remove easy-install.pth
itself
"""
absfiles = [join(config.build_prefix, f) for f in files]
sp_dir = environ.get_sp_dir()
for egg_path in glob(join(sp_dir, '*-py*.egg')):
if isdir(egg_path):
if preserve_egg_dir or not any(join(egg_path, i) in absfiles for i in walk_prefix(egg_path, False)):
write_pth(egg_path)
continue
print('found egg dir:', egg_path)
try:
os.rename(join(egg_path, 'EGG-INFO/PKG-INFO'),
egg_path + '-info')
except OSError:
pass
utils.rm_rf(join(egg_path, 'EGG-INFO'))
for fn in os.listdir(egg_path):
if fn == '__pycache__':
utils.rm_rf(join(egg_path, fn))
else:
# this might be a name-space package
# so the package directory already exists
# from another installed dependency
if os.path.exists(join(sp_dir, fn)):
utils.copy_into(join(egg_path, fn), join(sp_dir, fn))
utils.rm_rf(join(egg_path, fn))
else:
os.rename(join(egg_path, fn), join(sp_dir, fn))
elif isfile(egg_path):
if not egg_path in absfiles:
continue
print('found egg:', egg_path)
write_pth(egg_path)
utils.rm_rf(join(sp_dir, 'easy-install.pth'))
def rm_py_along_so():
"remove .py (.pyc) files alongside .so or .pyd files"
for root, dirs, files in os.walk(config.build_prefix):
for fn in files:
if fn.endswith(('.so', '.pyd')):
name, unused_ext = splitext(fn)
for ext in '.py', '.pyc':
if name + ext in files:
os.unlink(join(root, name + ext))
def compile_missing_pyc():
sp_dir = environ.get_sp_dir()
stdlib_dir = environ.get_stdlib_dir()
need_compile = False
for root, dirs, files in os.walk(sp_dir):
for fn in files:
if fn.endswith('.py') and fn + 'c' not in files:
need_compile = True
break
if need_compile:
print('compiling .pyc files...')
utils._check_call([config.build_python, '-Wi',
join(stdlib_dir, 'compileall.py'),
'-q', '-x', 'port_v3', sp_dir])
def post_process(files, preserve_egg_dir=False):
remove_easy_install_pth(files, preserve_egg_dir=preserve_egg_dir)
rm_py_along_so()
if config.CONDA_PY < 30:
compile_missing_pyc()
def find_lib(link, path=None):
from conda_build.build import prefix_files
files = prefix_files()
if link.startswith(config.build_prefix):
link = link[len(config.build_prefix) + 1:]
if link not in files:
sys.exit("Error: Could not find %s" % link)
return link
if link.startswith('/'): # but doesn't start with the build prefix
return
if link.startswith('@rpath/'):
# Assume the rpath already points to lib, so there is no need to
# change it.
return
if '/' not in link or link.startswith('@executable_path/'):
link = basename(link)
file_names = defaultdict(list)
for f in files:
file_names[basename(f)].append(f)
if link not in file_names:
sys.exit("Error: Could not find %s" % link)
if len(file_names[link]) > 1:
if path and basename(path) == link:
# The link is for the file itself, just use it
return path
# Allow for the possibility of the same library appearing in
# multiple places.
md5s = set()
for f in file_names[link]:
md5s.add(md5_file(join(config.build_prefix, f)))
if len(md5s) > 1:
sys.exit("Error: Found multiple instances of %s: %s" % (link, file_names[link]))
else:
file_names[link].sort()
print("Found multiple instances of %s (%s). "
"Choosing the first one." % (link, file_names[link]))
return file_names[link][0]
print("Don't know how to find %s, skipping" % link)
def osx_ch_link(path, link):
print("Fixing linking of %s in %s" % (link, path))
link_loc = find_lib(link, path)
if not link_loc:
return
lib_to_link = relpath(dirname(link_loc), 'lib')
# path_to_lib = utils.relative(path[len(config.build_prefix) + 1:])
# e.g., if
# path = '/build_prefix/lib/some/stuff/libstuff.dylib'
# link_loc = 'lib/things/libthings.dylib'
# then
# lib_to_link = 'things'
# path_to_lib = '../..'
# @rpath always means 'lib', link will be at
# @rpath/lib_to_link/basename(link), like @rpath/things/libthings.dylib.
# For when we can't use @rpath, @loader_path means the path to the library
# ('path'), so from path to link is
# @loader_path/path_to_lib/lib_to_link/basename(link), like
# @loader_path/../../things/libthings.dylib.
ret = '@rpath/%s/%s' % (lib_to_link, basename(link))
# XXX: IF the above fails for whatever reason, the below can be used
# TODO: This might contain redundant ..'s if link and path are both in
# some subdirectory of lib.
# ret = '@loader_path/%s/%s/%s' % (path_to_lib, lib_to_link, basename(link))
ret = ret.replace('/./', '/')
return ret
def mk_relative_osx(path, build_prefix=None):
'''
if build_prefix is None, then this is a standard conda build. The path
and all dependencies are in the build_prefix.
if package is built in develop mode, build_prefix is specified. Object
specified by 'path' needs to relink runtime dependences to libs found in
build_prefix/lib/. Also, in develop mode, 'path' is not in 'build_prefix'
'''
if build_prefix is None:
assert path.startswith(config.build_prefix + '/')
else:
config.short_build_prefix = build_prefix
assert sys.platform == 'darwin' and is_obj(path)
s = macho.install_name_change(path, osx_ch_link)
names = macho.otool(path)
if names:
# Strictly speaking, not all object files have install names (e.g.,
# bundles and executables do not). In that case, the first name here
# will not be the install name (i.e., the id), but it isn't a problem,
# because in that case it will be a no-op (with the exception of stub
# files, which give an error, which is handled below).
args = [
'install_name_tool',
'-id',
join('@rpath', relpath(dirname(path),
join(config.build_prefix, 'lib')), basename(names[0])),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s" % path)
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
# Add an rpath to every executable to increase the chances of it
# being found.
args = [
'install_name_tool',
'-add_rpath',
join('@loader_path', relpath(join(config.build_prefix, 'lib'),
dirname(path)), '').replace('/./', '/'),
path,
]
print(' '.join(args))
p = Popen(args, stderr=PIPE)
stdout, stderr = p.communicate()
stderr = stderr.decode('utf-8')
if "Mach-O dynamic shared library stub file" in stderr:
print("Skipping Mach-O dynamic shared library stub file %s\n" % path)
return
elif "would duplicate path, file already has LC_RPATH for:" in stderr:
print("Skipping -add_rpath, file already has LC_RPATH set")
return
else:
print(stderr, file=sys.stderr)
if p.returncode:
raise RuntimeError("install_name_tool failed with exit status %d"
% p.returncode)
if s:
# Skip for stub files, which have to use binary_has_prefix_files to be
# made relocatable.
assert_relative_osx(path)
def mk_relative_linux(f, rpaths=('lib',)):
path = join(config.build_prefix, f)
rpath = ':'.join('$ORIGIN/' + utils.relative(f, d) for d in rpaths)
patchelf = external.find_executable('patchelf')
print('patchelf: file: %s\n setting rpath to: %s' % (path, rpath))
call([patchelf, '--force-rpath', '--set-rpath', rpath, path])
def assert_relative_osx(path):
for name in macho.otool(path):
assert not name.startswith(config.build_prefix), path
def mk_relative(m, f):
assert sys.platform != 'win32'
path = join(config.build_prefix, f)
if not is_obj(path):
return
if sys.platform.startswith('linux'):
mk_relative_linux(f, rpaths=m.get_value('build/rpaths', ['lib']))
elif sys.platform == 'darwin':
mk_relative_osx(path)
def fix_permissions(files):
print("Fixing permissions")
for root, dirs, unused_files in os.walk(config.build_prefix):
for dn in dirs:
lchmod(join(root, dn), int('755', 8))
for f in files:
path = join(config.build_prefix, f)
st = os.lstat(path)
lchmod(path, stat.S_IMODE(st.st_mode) | stat.S_IWUSR) # chmod u+w
def post_build(m, files):
print('number of files:', len(files))
fix_permissions(files)
if sys.platform == 'win32':
return
binary_relocation = bool(m.get_value('build/binary_relocation', True))
if not binary_relocation:
print("Skipping binary relocation logic")
osx_is_app = bool(m.get_value('build/osx_is_app', False))
for f in files:
if f.startswith('bin/'):
fix_shebang(f, osx_is_app=osx_is_app)
if binary_relocation:
mk_relative(m, f)
check_symlinks(files)
def check_symlinks(files):
if readlink is False:
return # Not on Unix system
msgs = []
real_build_prefix = realpath(config.build_prefix)
for f in files:
path = join(real_build_prefix, f)
if islink(path):
link_path = readlink(path)
real_link_path = realpath(path)
if real_link_path.startswith(real_build_prefix):
# If the path is in the build prefix, this is fine, but
# the link needs to be relative
if not link_path.startswith('.'):
# Don't change the link structure if it is already a
# relative link. It's possible that ..'s later in the path
# can result in a broken link still, but we'll assume that
# such crazy things don't happen.
print("Making absolute symlink %s -> %s relative" % (f, link_path))
os.unlink(path)
os.symlink(relpath(real_link_path, dirname(path)), path)
else:
# Symlinks to absolute paths on the system (like /usr) are fine.
if real_link_path.startswith(config.croot):
msgs.append("%s is a symlink to a path that may not "
"exist after the build is completed (%s)" % (f, link_path))
if msgs:
for msg in msgs:
print("Error: %s" % msg, file=sys.stderr)
sys.exit(1)
def get_build_metadata(m):
src_dir = source.get_dir()
if exists(join(src_dir, '__conda_version__.txt')):
with open(join(src_dir, '__conda_version__.txt')) as f:
version = f.read().strip()
print("Setting version from __conda_version__.txt: %s" % version)
m.meta['package']['version'] = version
if exists(join(src_dir, '__conda_buildnum__.txt')):
with open(join(src_dir, '__conda_buildnum__.txt')) as f:
build_number = f.read().strip()
print("Setting build number from __conda_buildnum__.txt: %s" %
build_number)
m.meta['build']['number'] = build_number
if exists(join(src_dir, '__conda_buildstr__.txt')):
with open(join(src_dir, '__conda_buildstr__.txt')) as f:
buildstr = f.read().strip()
print("Setting version from __conda_buildstr__.txt: %s" % buildstr)
m.meta['build']['string'] = buildstr
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
"""
This module for blb test.
"""
import os
import sys
import unittest
import uuid
file_path = os.path.normpath(os.path.dirname(__file__))
sys.path.append(file_path + '/../../')
if sys.version < '3':
reload(sys)
sys.setdefaultencoding('utf-8')
import baidubce
from baidubce.auth.bce_credentials import BceCredentials
from baidubce.bce_client_configuration import BceClientConfiguration
from baidubce.services.blb import app_blb_client
"""
# sandbox
vpc_id = b''
subnetId = b''
HOST = b''
AK = b''
SK = b''
blbId = b''
bccId = b''
bccIP = ''
appServerGroupId = ''
policyId = ''
"""
# online
vpc_id = b''
subnetId = b''
HOST = b''
AK = b''
SK = b''
blbId = b''
bccId = ''
appServerGroupId = ''
policyId = ''
portId = ''
def generate_client_token_by_uuid():
"""
The default method to generate the random string for client_token
if the optional parameter client_token is not specified by the user.
:return:
:rtype string
"""
return str(uuid.uuid4())
generate_client_token = generate_client_token_by_uuid
class TestAppBlbClient(unittest.TestCase):
"""
unit test
"""
def setUp(self):
"""
set up
"""
config = BceClientConfiguration(
credentials=BceCredentials(AK, SK), endpoint=HOST)
self.the_client = app_blb_client.AppBlbClient(config)
def test_create_app_loadbalancer(self):
"""
test case for create_app_loadbalancer
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.create_app_loadbalancer(
name='test_blb_hzf111', vpc_id=vpc_id, subnet_id=subnetId,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_update_app_loadbalancer(self):
"""
test case for update_app_loadbalancer
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.update_app_loadbalancer(
blbId, name=b'blb_test_hzf_new',
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_describe_app_loadbalancers(self):
"""
test case for describe_app_loadbalancers
"""
print(self.the_client.describe_app_loadbalancers())
def test_describe_app_loadbalancer_detail(self):
"""
test case for describe_app_loadbalancer_detail
"""
print(self.the_client.describe_app_loadbalancer_detail(blbId))
def test_delete_app_loadbalancer(self):
"""
test case for delete_app_loadbalancer
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.delete_app_loadbalancer(
blbId, client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_tcp_listener(self):
"""
test case for create_app_tcp_listener
"""
client_token = generate_client_token()
#test_token = str.encode(test_token)
self.assertEqual(
type(self.the_client.create_app_tcp_listener(
blbId, 1900, 'Hash', client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_udp_listener(self):
"""
test case for create_app_udp_listener
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.create_app_udp_listener(
blbId, 30000, 'Hash',
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_http_listener(self):
"""
test case for create_app_http_listener
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.create_app_http_listener(
blbId, 600, 'LeastConnection',
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_https_listener(self):
"""
test case for create_app_https_listener
"""
client_token = generate_client_token()
cert_ids = []
cert_ids.append('cert-6nszzxe4kj6i')
self.assertEqual(
type(self.the_client.create_app_https_listener(
blbId, 800, 'LeastConnection', cert_ids,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_ssl_listener(self):
"""
test case for create_ssl_listener
"""
client_token = generate_client_token()
cert_ids = []
cert_ids.append('cert-6nszzxe4kj6i')
self.assertEqual(
type(self.the_client.create_app_ssl_listener(
blbId, 1100, 'LeastConnection', cert_ids,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_update_app_tcp_listener(self):
"""
test case for app tcp listener
"""
self.assertEqual(
type(self.the_client.update_app_tcp_listener(
blbId, 1900, scheduler='RoundRobin')),
baidubce.bce_response.BceResponse)
def test_update_app_udp_listener(self):
"""
test case for app udp listener
"""
self.assertEqual(
type(self.the_client.update_app_udp_listener(
blbId, 30000, 'RoundRobin')),
baidubce.bce_response.BceResponse)
def test_update_app_http_listener(self):
"""
test case for app http listener
"""
self.assertEqual(
type(self.the_client.update_app_http_listener(
blbId, 600, server_timeout=750)),
baidubce.bce_response.BceResponse)
def test_update_app_https_listener(self):
"""
test case for app https listener
"""
self.assertEqual(
type(self.the_client.update_app_https_listener(
blbId, 800, server_timeout=800)),
baidubce.bce_response.BceResponse)
def test_update_app_ssl_listener(self):
"""
test case for app ssl listener
"""
cert_ids = []
cert_ids.append('cert-f10dqrtjxyb7')
self.assertEqual(
type(self.the_client.update_app_ssl_listener(
blbId, 1100, scheduler='RoundRobin', dual_auth=False)),
baidubce.bce_response.BceResponse)
def test_describe_app_tcp_listener(self):
"""
test case for describe_app_tcp_listener
"""
print(self.the_client.describe_app_tcp_listener(blbId))
def test_describe_app_udp_listener(self):
"""
test case for describe_app_udp_listener
"""
print(self.the_client.describe_app_udp_listener(blbId))
def test_describe_app_http_listener(self):
"""
test case for describe_app_http_listener
"""
print(self.the_client.describe_app_http_listener(blbId))
def test_describe_app_https_listener(self):
"""
test case for describe_app_https_listener
"""
print(self.the_client.describe_app_https_listener(blbId))
def test_describe_app_ssl_listener(self):
"""
test case for describe_app_ssl_listener
"""
print(self.the_client.describe_app_ssl_listener(blbId))
def test_delete_app_listeners(self):
"""
test case for delete app listener
"""
client_token = generate_client_token()
portlist = []
portlist.append(1900)
self.assertEqual(
type(self.the_client.delete_app_listeners(
blbId, portlist, client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_policys(self):
"""
test case for create policys
"""
client_token = generate_client_token()
app_policy_vos = []
rule_list = []
app_rule = {
'key': '*',
'value': '*'
}
rule_list.append(app_rule)
app_policy = {
'desc': 'for test',
'appServerGroupId': appServerGroupId,
'backendPort': 666,
'priority': 2334,
'ruleList': rule_list
}
app_policy_vos.append(app_policy)
self.assertEqual(
type(self.the_client.create_policys(
blbId, 1900, app_policy_vos)),
baidubce.bce_response.BceResponse)
def test_describe_policys(self):
"""
test case for describe policys
"""
print(self.the_client.describe_policys(blbId, 1900))
def test_delete_policys(self):
"""
test case for delete policys
"""
client_token = generate_client_token()
policyid_list = []
policyid_list.append(policyId)
self.assertEqual(
type(self.the_client.delete_policys(
blbId, 1900, policyid_list, client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_server_group(self):
"""
test case for create app server group
"""
client_token = generate_client_token()
app_backserver = {
'instanceId': bccId,
'weight': 50
#'privateIp': bccIP,
#'portList': port_list
}
backserver_list = []
backserver_list.append(app_backserver)
self.assertEqual(
type(self.the_client.create_app_server_group(
blbId,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_update_app_server_group(self):
"""
test case for update backend servers
"""
client_token = generate_client_token()
new_name = 'updated111'
self.assertEqual(
type(self.the_client.update_app_server_group(
blbId, appServerGroupId,
name=new_name,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_describe_app_server_group(self):
"""
test case for describe app server group
"""
print(self.the_client.describe_app_server_group(blbId))
def test_delete_app_server_group(self):
"""
test case for delete app server group
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.delete_app_server_group(
blbId, appServerGroupId,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_server_group_port(self):
"""
test case for create app server group port
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.create_app_server_group_port(
blbId, appServerGroupId, 6700, 'TCP',
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_update_app_server_group_port(self):
"""
test case for update backend servers port
"""
client_token = generate_client_token()
self.assertEqual(
type(self.the_client.update_app_server_group_port(
blbId, appServerGroupId, portId,
health_check_timeout_insecond=10,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_delete_app_server_group_port(self):
"""
test case for delete app server group port
"""
client_token = generate_client_token()
port_list = [portId]
self.assertEqual(
type(self.the_client.delete_app_server_group_port(
blbId, appServerGroupId, port_list,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_create_app_blb_rs(self):
"""
test case for create app blb rs
"""
client_token = generate_client_token()
backend_server_list = [{
'instanceId': bccId,
'weight': 56
}]
self.assertEqual(
type(self.the_client.create_app_blb_rs(
blbId, appServerGroupId, backend_server_list,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_update_app_blb_rs(self):
"""
test case for update app blb rs
"""
client_token = generate_client_token()
backend_server_list = [{
'instanceId': bccId,
'weight': 57
}]
self.assertEqual(
type(self.the_client.update_app_blb_rs(
blbId, appServerGroupId, backend_server_list,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_describe_app_blb_rs(self):
"""
test case for app blb rs
"""
print(self.the_client.describe_app_blb_rs(blbId, appServerGroupId))
def test_delete_app_blb_rs(self):
"""
test case for delete app blb rs
"""
client_token = generate_client_token()
backend_server_list = []
backend_server_list.append(bccId)
self.assertEqual(
type(self.the_client.delete_app_blb_rs(
blbId, appServerGroupId, backend_server_list,
client_token=client_token)),
baidubce.bce_response.BceResponse)
def test_describe_rs_mount(self):
"""
test case for describe rs mount
"""
print(self.the_client.describe_rs_mount(blbId, appServerGroupId))
def test_describe_rs_unmount(self):
"""
test case for describe rs mount
"""
print(self.the_client.describe_rs_unmount(blbId, appServerGroupId))
if __name__ == "__main__":
suite = unittest.TestSuite()
#suite.addTest(TestAppBlbClient("test_create_app_loadbalancer"))
#suite.addTest(TestAppBlbClient("test_update_app_loadbalancer"))
#suite.addTest(TestAppBlbClient("test_describe_app_loadbalancers"))
#suite.addTest(TestAppBlbClient("test_describe_app_loadbalancer_detail"))
#suite.addTest(TestAppBlbClient("test_delete_app_loadbalancer"))
#suite.addTest(TestAppBlbClient("test_create_app_tcp_listener"))
#suite.addTest(TestAppBlbClient("test_create_app_udp_listener"))
#suite.addTest(TestAppBlbClient("test_create_app_http_listener"))
#suite.addTest(TestAppBlbClient("test_create_app_https_listener"))
#suite.addTest(TestAppBlbClient("test_create_app_ssl_listener"))
#suite.addTest(TestAppBlbClient("test_update_app_tcp_listener"))
#suite.addTest(TestAppBlbClient("test_update_app_udp_listener"))
#suite.addTest(TestAppBlbClient("test_update_app_http_listener"))
#suite.addTest(TestAppBlbClient("test_update_app_https_listener"))
#suite.addTest(TestAppBlbClient("test_update_app_ssl_listener"))
#suite.addTest(TestAppBlbClient("test_describe_app_tcp_listener"))
#suite.addTest(TestAppBlbClient("test_describe_app_udp_listener"))
#suite.addTest(TestAppBlbClient("test_describe_app_http_listener"))
#suite.addTest(TestAppBlbClient("test_describe_app_https_listener"))
#suite.addTest(TestAppBlbClient("test_describe_app_ssl_listener"))
#suite.addTest(TestAppBlbClient("test_delete_app_listeners"))
#suite.addTest(TestAppBlbClient("test_create_policys"))
#suite.addTest(TestAppBlbClient("test_describe_policys"))
#suite.addTest(TestAppBlbClient("test_delete_policys"))
#suite.addTest(TestAppBlbClient("test_create_app_server_group"))
#suite.addTest(TestAppBlbClient("test_update_app_server_group"))
#suite.addTest(TestAppBlbClient("test_describe_app_server_group"))
#suite.addTest(TestAppBlbClient("test_delete_app_server_group"))
#suite.addTest(TestAppBlbClient("test_create_app_server_group_port"))
#suite.addTest(TestAppBlbClient("test_update_app_server_group_port"))
#suite.addTest(TestAppBlbClient("test_delete_app_server_group_port"))
#suite.addTest(TestAppBlbClient("test_create_app_blb_rs"))
#suite.addTest(TestAppBlbClient("test_update_app_blb_rs"))
#suite.addTest(TestAppBlbClient("test_describe_app_blb_rs"))
#suite.addTest(TestAppBlbClient("test_delete_app_blb_rs"))
#suite.addTest(TestAppBlbClient("test_describe_rs_mount"))
#suite.addTest(TestAppBlbClient("test_describe_rs_unmount"))
runner = unittest.TextTestRunner()
runner.run(suite)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.